source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unaryop__lnot_int8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_uint8
// op(A') function: GB_tran__lnot_int8_uint8
// C type: int8_t
// A type: uint8_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int8_t z = (int8_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_uint8
(
int8_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fft_omp.c | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <complex.h>
#include <omp.h>
#ifndef M_PI
#define M_PI 3.1415926535
#endif
void fftOmp_initWtable(double complex *W, int W_size);
void fftUtils_reverseArray(double complex* array, int array_size);
double fftOmp_randomGen (double *seed);
int reverse(int N, int n);
void fft(double complex* array, unsigned long array_size)
{
FILE *fp;
double complex *W;
unsigned long n = 1, i;
unsigned long a = array_size / 2;
// Create file
fp = fopen("results.txt", "w");
// Alocate and init W
W = (double complex*) malloc( array_size/2 * sizeof(complex double));
fftOmp_initWtable(W, array_size);
// Bits Inversion
fftUtils_reverseArray(array, 8);
// Executing main FFT algorithm
printf(" Executing FFT... \n");
// For through stages
for(unsigned long j = 0; j < log2(array_size); j++) {
// Main loop paralelization
#pragma omp parallel shared ( array, array_size, W, n, a ) private (i)
#pragma omp for
for(i = 0; i < array_size; i++) {
if(!(i & n)) {
double complex temp_first_component = array[i];
double complex temp_second_component = W[(i * a) % (n * a)]* array[i + n];
array[i] = temp_first_component + temp_second_component;
array[i + n] = temp_first_component - temp_second_component;
}
}
n *= 2;
a = a / 2;
}
for (int i = 0; i < array_size; i++) {
fprintf(fp, "Samples[%i] = %.2f + %.2fi\n", i, creal(array[i]), cimag(array[i]));
}
fclose(fp);
free(W);
}
int main(int argc, char const *argv[]) {
double complex samples[8] = {1, 2, 3, 4, 5, 6, 7, 8};
double complex *input;
int exponent, i;
int nThreads, tid, nUsedThreads;
unsigned long n;
static double seed;
printf(" ********************************* \n");
printf(" *** FFT - OMP implentation ***** \n");
printf(" ********************************* \n");
printf(" *** T.Cejrowski 21.11.2016 *** \n");
printf(" ********************************* \n");
printf ( "\n" );
printf ( " Number of processors available = %d\n", omp_get_num_procs ( ) );
printf ( " Number of threads = %d\n", omp_get_max_threads ( ) );
printf ( "\n");
printf ( " Enter number of threads you want to use: \n" );
scanf ( "\t %i", &nUsedThreads);
omp_set_num_threads(nUsedThreads);
#pragma omp parallel
{
#pragma omp master
printf(" Using %i threads.\n", omp_get_num_threads());
}
printf ( " Preparing for samples generation... Enter n (2^n) : \n" );
scanf ( "\t %i", &exponent);
n = pow(2, exponent);
printf ( " Generating %lu (2^%i) samples... \n\n", n, exponent);
seed = 321.0;
input = ( double complex * ) malloc ( n * sizeof ( double complex) );
// Initialize data
#pragma omp parallel shared (input , seed) private (i)
#pragma omp for
for (i = 0; i < n; i++ )
{
input[i] = fftOmp_randomGen(&seed);
input[i] += fftOmp_randomGen(&seed)*I;
}
printf(" Done! \n");
// Main FFT function
fft(input, n);
printf(" Success! Results are in results.txt file\n");
free(input);
return 0;
}
/******************************************************************************/
double fftOmp_randomGen (double *seed)
{
double d2 = 0.2147483647e10;
double t;
double value;
t = ( double ) *seed;
t = fmod ( 16807.0 * t, d2 );
*seed = ( double ) t;
value = ( double ) ( ( t - 1.0 ) / ( d2 - 1.0 ) );
return value;
}
/******************************************************************************/
void fftOmp_initWtable(double complex *W, int number_of_samples)
{
int i;
W[0] = 1;
W[1] = cexp(-2*M_PI*I/number_of_samples);
#pragma omp parallel shared ( W ) private ( i )
#pragma omp for nowait
for(int i = 2; i < number_of_samples/2; i++) {
W[i] = cpow(W[1], (double complex)i);
}
}
/******************************************************************************/
int reverse(int N, int n)
{
int j, p = 0;
for(j = 1; j <= log2(N); j++) {
if(n & (1 << ((int) log2(N) - j)))
p |= 1 << (j - 1);
}
return p;
}
/******************************************************************************/
void fftUtils_reverseArray(double complex* array, int array_size)
{
double complex temp;
int reversion_idx, i;
#pragma omp parallel shared ( array, array_size) private (i)
#pragma omp for
for(i = 0; i < array_size/2; i++)
{
reversion_idx = reverse(array_size, i);
temp = array[i];
array[i] = array[reversion_idx];
array[reversion_idx] = temp;
}
}
|
par_csr_matrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_ParCSRMatrix class.
*
*****************************************************************************/
#include "_hypre_parcsr_mv.h"
#include "../seq_mv/HYPRE_seq_mv.h"
#include "../seq_mv/csr_matrix.h"
/* In addition to publically accessible interface in HYPRE_mv.h, the
implementation in this file uses accessor macros into the sequential matrix
structure, and so includes the .h that defines that structure. Should those
accessor functions become proper functions at some later date, this will not
be necessary. AJC 4/99 */
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int hypre_FillResponseParToCSRMatrix(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*);
#endif
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCreate
*--------------------------------------------------------------------------*/
/* If create is called for HYPRE_NO_GLOBAL_PARTITION and row_starts and
col_starts are NOT null, then it is assumed that they are array of length 2
containing the start row of the calling processor followed by the start row
of the next processor - AHB 6/05 */
hypre_ParCSRMatrix*
hypre_ParCSRMatrixCreate( MPI_Comm comm,
HYPRE_BigInt global_num_rows,
HYPRE_BigInt global_num_cols,
HYPRE_BigInt *row_starts,
HYPRE_BigInt *col_starts,
HYPRE_Int num_cols_offd,
HYPRE_Int num_nonzeros_diag,
HYPRE_Int num_nonzeros_offd )
{
hypre_ParCSRMatrix *matrix;
HYPRE_Int num_procs, my_id;
HYPRE_Int local_num_rows, local_num_cols;
HYPRE_BigInt first_row_index, first_col_diag;
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
if (!row_starts)
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_GenerateLocalPartitioning(global_num_rows, num_procs, my_id,
&row_starts);
#else
hypre_GeneratePartitioning(global_num_rows, num_procs, &row_starts);
#endif
}
if (!col_starts)
{
if (global_num_rows == global_num_cols)
{
col_starts = row_starts;
}
else
{
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_GenerateLocalPartitioning(global_num_cols, num_procs, my_id,
&col_starts);
#else
hypre_GeneratePartitioning(global_num_cols, num_procs, &col_starts);
#endif
}
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* row_starts[0] is start of local rows. row_starts[1] is start of next
processor's rows */
first_row_index = row_starts[0];
local_num_rows = row_starts[1]-first_row_index ;
first_col_diag = col_starts[0];
local_num_cols = col_starts[1]-first_col_diag;
#else
first_row_index = row_starts[my_id];
local_num_rows = row_starts[my_id+1]-first_row_index;
first_col_diag = col_starts[my_id];
local_num_cols = col_starts[my_id+1]-first_col_diag;
#endif
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixDiag(matrix) =
hypre_CSRMatrixCreate(local_num_rows, local_num_cols,num_nonzeros_diag);
hypre_ParCSRMatrixOffd(matrix) =
hypre_CSRMatrixCreate(local_num_rows, num_cols_offd,num_nonzeros_offd);
hypre_ParCSRMatrixDiagT(matrix) = NULL;
hypre_ParCSRMatrixOffdT(matrix) = NULL; // JSP: transposed matrices are optional
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
hypre_ParCSRMatrixFirstRowIndex(matrix) = first_row_index;
hypre_ParCSRMatrixFirstColDiag(matrix) = first_col_diag;
hypre_ParCSRMatrixLastRowIndex(matrix) = first_row_index + local_num_rows - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = first_col_diag + local_num_cols - 1;
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
hypre_ParCSRMatrixDeviceColMapOffd(matrix) = NULL;
hypre_ParCSRMatrixProcOrdering(matrix) = NULL;
hypre_ParCSRMatrixAssumedPartition(matrix) = NULL;
hypre_ParCSRMatrixOwnsAssumedPartition(matrix) = 1;
/* When NO_GLOBAL_PARTITION is set we could make these null, instead
of leaving the range. If that change is made, then when this create
is called from functions like the matrix-matrix multiply, be careful
not to generate a new partition */
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
hypre_ParCSRMatrixCommPkgT(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
{
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
}
hypre_ParCSRMatrixRowindices(matrix) = NULL;
hypre_ParCSRMatrixRowvalues(matrix) = NULL;
hypre_ParCSRMatrixGetrowactive(matrix) = 0;
matrix->bdiaginv = NULL;
matrix->bdiaginv_comm_pkg = NULL;
matrix->bdiag_size = -1;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixDestroy( hypre_ParCSRMatrix *matrix )
{
if (matrix)
{
if ( hypre_ParCSRMatrixOwnsData(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(matrix));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(matrix));
if ( hypre_ParCSRMatrixDiagT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiagT(matrix));
}
if ( hypre_ParCSRMatrixOffdT(matrix) )
{
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffdT(matrix));
}
if (hypre_ParCSRMatrixColMapOffd(matrix))
{
/*ASSERT_HOST(hypre_ParCSRMatrixColMapOffd(matrix));*/
hypre_TFree(hypre_ParCSRMatrixColMapOffd(matrix), HYPRE_MEMORY_HOST);
}
if (hypre_ParCSRMatrixDeviceColMapOffd(matrix))
{
hypre_TFree(hypre_ParCSRMatrixDeviceColMapOffd(matrix), HYPRE_MEMORY_DEVICE);
}
if (hypre_ParCSRMatrixCommPkg(matrix))
{
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkg(matrix));
}
if (hypre_ParCSRMatrixCommPkgT(matrix))
{
hypre_MatvecCommPkgDestroy(hypre_ParCSRMatrixCommPkgT(matrix));
}
}
if ( hypre_ParCSRMatrixOwnsRowStarts(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixRowStarts(matrix), HYPRE_MEMORY_HOST);
}
if ( hypre_ParCSRMatrixOwnsColStarts(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixColStarts(matrix), HYPRE_MEMORY_HOST);
}
hypre_TFree(hypre_ParCSRMatrixRowindices(matrix), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRMatrixRowvalues(matrix), HYPRE_MEMORY_HOST);
if ( hypre_ParCSRMatrixAssumedPartition(matrix) && hypre_ParCSRMatrixOwnsAssumedPartition(matrix) )
{
hypre_AssumedPartitionDestroy(hypre_ParCSRMatrixAssumedPartition(matrix));
}
if ( hypre_ParCSRMatrixProcOrdering(matrix) )
{
hypre_TFree(hypre_ParCSRMatrixProcOrdering(matrix), HYPRE_MEMORY_HOST);
}
hypre_TFree(matrix->bdiaginv, HYPRE_MEMORY_HOST);
if (matrix->bdiaginv_comm_pkg)
{
hypre_MatvecCommPkgDestroy(matrix->bdiaginv_comm_pkg);
}
hypre_TFree(matrix, HYPRE_MEMORY_HOST);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixInitialize_v2( hypre_ParCSRMatrix *matrix, HYPRE_Int memory_location )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixDiag(matrix), 0, memory_location);
hypre_CSRMatrixInitialize_v2(hypre_ParCSRMatrixOffd(matrix), 0, memory_location);
hypre_ParCSRMatrixColMapOffd(matrix) =
hypre_CTAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix)),
HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixInitialize( hypre_ParCSRMatrix *matrix )
{
return hypre_ParCSRMatrixInitialize_v2(matrix, HYPRE_MEMORY_SHARED);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixClone
* Creates and returns a new copy S of the argument A
* The following variables are not copied because they will be constructed
* later if needed: CommPkg, CommPkgT, rowindices, rowvalues
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix*
hypre_ParCSRMatrixClone_v2(hypre_ParCSRMatrix *A, HYPRE_Int copy_data, HYPRE_Int memory_location)
{
hypre_ParCSRMatrix *S;
S = hypre_ParCSRMatrixCreate( hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixGlobalNumCols(A),
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)),
hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixDiag(A)),
hypre_CSRMatrixNumNonzeros(hypre_ParCSRMatrixOffd(A)) );
/* !!! S does not own Row/Col-Starts */
hypre_ParCSRMatrixSetRowStartsOwner(S, 0);
hypre_ParCSRMatrixSetColStartsOwner(S, 0);
hypre_ParCSRMatrixNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(S) = hypre_ParCSRMatrixNumNonzeros(A);
hypre_ParCSRMatrixInitialize_v2(S, memory_location);
hypre_ParCSRMatrixCopy(A, S, copy_data);
return S;
}
hypre_ParCSRMatrix*
hypre_ParCSRMatrixClone(hypre_ParCSRMatrix *A, HYPRE_Int copy_data)
{
return hypre_ParCSRMatrixClone_v2(A, copy_data, HYPRE_MEMORY_SHARED);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetNumNonzeros( hypre_ParCSRMatrix *matrix )
{
MPI_Comm comm;
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int local_num_rows;
HYPRE_BigInt total_num_nonzeros;
HYPRE_BigInt local_num_nonzeros;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
diag_i = hypre_CSRMatrixI(diag);
offd = hypre_ParCSRMatrixOffd(matrix);
offd_i = hypre_CSRMatrixI(offd);
local_num_rows = hypre_CSRMatrixNumRows(diag);
local_num_nonzeros = (HYPRE_BigInt)(diag_i[local_num_rows] + offd_i[local_num_rows]);
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
hypre_ParCSRMatrixNumNonzeros(matrix) = total_num_nonzeros;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDNumNonzeros
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDNumNonzeros( hypre_ParCSRMatrix *matrix )
{
MPI_Comm comm;
hypre_CSRMatrix *diag;
HYPRE_Int *diag_i;
hypre_CSRMatrix *offd;
HYPRE_Int *offd_i;
HYPRE_Int local_num_rows;
HYPRE_Real total_num_nonzeros;
HYPRE_Real local_num_nonzeros;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
diag_i = hypre_CSRMatrixI(diag);
offd = hypre_ParCSRMatrixOffd(matrix);
offd_i = hypre_CSRMatrixI(offd);
local_num_rows = hypre_CSRMatrixNumRows(diag);
local_num_nonzeros = diag_i[local_num_rows];
local_num_nonzeros += offd_i[local_num_rows];
hypre_MPI_Allreduce(&local_num_nonzeros, &total_num_nonzeros, 1,
HYPRE_MPI_REAL, hypre_MPI_SUM, comm);
hypre_ParCSRMatrixDNumNonzeros(matrix) = total_num_nonzeros;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetDataOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_data )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsData(matrix) = owns_data;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetRowStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetRowStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_row_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsRowStarts(matrix) = owns_row_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixSetColStartsOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixSetColStartsOwner( hypre_ParCSRMatrix *matrix,
HYPRE_Int owns_col_starts )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_ParCSRMatrixOwnsColStarts(matrix) = owns_col_starts;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRead
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_ParCSRMatrixRead( MPI_Comm comm,
const char *file_name )
{
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
HYPRE_BigInt global_num_rows, global_num_cols;
HYPRE_Int num_cols_offd;
HYPRE_Int local_num_rows;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_BigInt *col_map_offd;
FILE *fp;
HYPRE_Int equal = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt row_s, row_e, col_s, col_e;
#endif
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST);
#else
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
#endif
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
fp = fopen(new_file_info, "r");
hypre_fscanf(fp, "%b", &global_num_rows);
hypre_fscanf(fp, "%b", &global_num_cols);
hypre_fscanf(fp, "%d", &num_cols_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
/* the bgl input file should only contain the EXACT range for local processor */
hypre_fscanf(fp, "%d %d %d %d", &row_s, &row_e, &col_s, &col_e);
row_starts[0] = row_s;
row_starts[1] = row_e;
col_starts[0] = col_s;
col_starts[1] = col_e;
#else
for (i=0; i < num_procs; i++)
hypre_fscanf(fp, "%b %b", &row_starts[i], &col_starts[i]);
row_starts[num_procs] = global_num_rows;
col_starts[num_procs] = global_num_cols;
#endif
col_map_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
hypre_fscanf(fp, "%b", &col_map_offd[i]);
fclose(fp);
#ifdef HYPRE_NO_GLOBAL_PARTITION
for (i=1; i >= 0; i--)
{
if (row_starts[i] != col_starts[i])
{
equal = 0;
break;
}
}
#else
for (i=num_procs; i >= 0; i--)
{
if (row_starts[i] != col_starts[i])
{
equal = 0;
break;
}
}
#endif
if (equal)
{
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
col_starts = row_starts;
}
diag = hypre_CSRMatrixRead(new_file_d);
local_num_rows = hypre_CSRMatrixNumRows(diag);
if (num_cols_offd)
{
offd = hypre_CSRMatrixRead(new_file_o);
}
else
{
offd = hypre_CSRMatrixCreate(local_num_rows,0,0);
hypre_CSRMatrixInitialize(offd);
}
matrix = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm(matrix) = comm;
hypre_ParCSRMatrixGlobalNumRows(matrix) = global_num_rows;
hypre_ParCSRMatrixGlobalNumCols(matrix) = global_num_cols;
#ifdef HYPRE_NO_GLOBAL_PARTITION
hypre_ParCSRMatrixFirstRowIndex(matrix) = row_s;
hypre_ParCSRMatrixFirstColDiag(matrix) = col_s;
hypre_ParCSRMatrixLastRowIndex(matrix) = row_e - 1;
hypre_ParCSRMatrixLastColDiag(matrix) = col_e - 1;
#else
hypre_ParCSRMatrixFirstRowIndex(matrix) = row_starts[my_id];
hypre_ParCSRMatrixFirstColDiag(matrix) = col_starts[my_id];
hypre_ParCSRMatrixLastRowIndex(matrix) = row_starts[my_id+1]-1;
hypre_ParCSRMatrixLastColDiag(matrix) = col_starts[my_id+1]-1;
#endif
hypre_ParCSRMatrixRowStarts(matrix) = row_starts;
hypre_ParCSRMatrixColStarts(matrix) = col_starts;
hypre_ParCSRMatrixCommPkg(matrix) = NULL;
/* set defaults */
hypre_ParCSRMatrixOwnsData(matrix) = 1;
hypre_ParCSRMatrixOwnsRowStarts(matrix) = 1;
hypre_ParCSRMatrixOwnsColStarts(matrix) = 1;
if (row_starts == col_starts)
hypre_ParCSRMatrixOwnsColStarts(matrix) = 0;
hypre_ParCSRMatrixDiag(matrix) = diag;
hypre_ParCSRMatrixOffd(matrix) = offd;
if (num_cols_offd)
hypre_ParCSRMatrixColMapOffd(matrix) = col_map_offd;
else
hypre_ParCSRMatrixColMapOffd(matrix) = NULL;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrint( hypre_ParCSRMatrix *matrix,
const char *file_name )
{
MPI_Comm comm;
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt *col_map_offd;
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
#endif
HYPRE_Int my_id, i, num_procs;
char new_file_d[80], new_file_o[80], new_file_info[80];
FILE *fp;
HYPRE_Int num_cols_offd = 0;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt row_s, row_e, col_s, col_e;
#endif
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
global_num_rows = hypre_ParCSRMatrixGlobalNumRows(matrix);
global_num_cols = hypre_ParCSRMatrixGlobalNumCols(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
row_starts = hypre_ParCSRMatrixRowStarts(matrix);
col_starts = hypre_ParCSRMatrixColStarts(matrix);
#endif
if (hypre_ParCSRMatrixOffd(matrix))
num_cols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(matrix));
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_file_d,"%s.D.%d",file_name,my_id);
hypre_sprintf(new_file_o,"%s.O.%d",file_name,my_id);
hypre_sprintf(new_file_info,"%s.INFO.%d",file_name,my_id);
hypre_CSRMatrixPrint(hypre_ParCSRMatrixDiag(matrix),new_file_d);
if (num_cols_offd != 0)
hypre_CSRMatrixPrint(hypre_ParCSRMatrixOffd(matrix),new_file_o);
fp = fopen(new_file_info, "w");
hypre_fprintf(fp, "%b\n", global_num_rows);
hypre_fprintf(fp, "%b\n", global_num_cols);
hypre_fprintf(fp, "%d\n", num_cols_offd);
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_s = hypre_ParCSRMatrixFirstRowIndex(matrix);
row_e = hypre_ParCSRMatrixLastRowIndex(matrix);
col_s = hypre_ParCSRMatrixFirstColDiag(matrix);
col_e = hypre_ParCSRMatrixLastColDiag(matrix);
/* add 1 to the ends because this is a starts partition */
hypre_fprintf(fp, "%b %b %b %b\n", row_s, row_e + 1, col_s, col_e + 1);
#else
for (i=0; i < num_procs; i++)
hypre_fprintf(fp, "%b %b\n", row_starts[i], col_starts[i]);
#endif
for (i=0; i < num_cols_offd; i++)
hypre_fprintf(fp, "%b\n", col_map_offd[i]);
fclose(fp);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixPrintIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixPrintIJ( const hypre_ParCSRMatrix *matrix,
const HYPRE_Int base_i,
const HYPRE_Int base_j,
const char *filename )
{
MPI_Comm comm;
HYPRE_BigInt first_row_index;
HYPRE_BigInt first_col_diag;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_BigInt *col_map_offd;
HYPRE_Int num_rows;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_Int myid, num_procs, i, j;
HYPRE_BigInt I, J;
char new_filename[255];
FILE *file;
HYPRE_Int num_nonzeros_offd;
HYPRE_BigInt ilower, iupper, jlower, jupper;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
comm = hypre_ParCSRMatrixComm(matrix);
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
num_rows = hypre_ParCSRMatrixNumRows(matrix);
row_starts = hypre_ParCSRMatrixRowStarts(matrix);
col_starts = hypre_ParCSRMatrixColStarts(matrix);
hypre_MPI_Comm_rank(comm, &myid);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
num_nonzeros_offd = hypre_CSRMatrixNumNonzeros(offd);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
}
#ifdef HYPRE_NO_GLOBAL_PARTITION
ilower = row_starts[0]+(HYPRE_BigInt)base_i;
iupper = row_starts[1]+(HYPRE_BigInt)base_i - 1;
jlower = col_starts[0]+(HYPRE_BigInt)base_j;
jupper = col_starts[1]+(HYPRE_BigInt)base_j - 1;
#else
ilower = row_starts[myid] +(HYPRE_BigInt)base_i;
iupper = row_starts[myid+1]+(HYPRE_BigInt)base_i - 1;
jlower = col_starts[myid] +(HYPRE_BigInt)base_j;
jupper = col_starts[myid+1]+(HYPRE_BigInt)base_j - 1;
#endif
hypre_fprintf(file, "%b %b %b %b\n", ilower, iupper, jlower, jupper);
for (i = 0; i < num_rows; i++)
{
I = first_row_index + (HYPRE_BigInt)(i + base_i);
/* print diag columns */
for (j = diag_i[i]; j < diag_i[i+1]; j++)
{
J = first_col_diag + (HYPRE_BigInt)(diag_j[j] + base_j);
if ( diag_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J,
hypre_creal(diag_data[j]), hypre_cimag(diag_data[j]));
#else
hypre_fprintf(file, "%b %b %.14e\n", I, J, diag_data[j]);
#endif
}
else
hypre_fprintf(file, "%b %b\n", I, J);
}
/* print offd columns */
if ( num_nonzeros_offd )
{
for (j = offd_i[i]; j < offd_i[i+1]; j++)
{
J = col_map_offd[offd_j[j]] + (HYPRE_BigInt)base_j;
if ( offd_data )
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(file, "%b %b %.14e , %.14e\n", I, J,
hypre_creal(offd_data[j]), hypre_cimag(offd_data[j]));
#else
hypre_fprintf(file, "%b %b %.14e\n", I, J, offd_data[j]);
#endif
}
else
hypre_fprintf(file, "%b %b\n", I, J );
}
}
}
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixReadIJ
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixReadIJ( MPI_Comm comm,
const char *filename,
HYPRE_Int *base_i_ptr,
HYPRE_Int *base_j_ptr,
hypre_ParCSRMatrix **matrix_ptr)
{
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_BigInt first_row_index;
HYPRE_BigInt first_col_diag;
HYPRE_BigInt last_col_diag;
hypre_ParCSRMatrix *matrix;
hypre_CSRMatrix *diag;
hypre_CSRMatrix *offd;
HYPRE_BigInt *col_map_offd;
HYPRE_BigInt *row_starts;
HYPRE_BigInt *col_starts;
HYPRE_Int num_rows;
HYPRE_BigInt big_base_i, big_base_j;
HYPRE_Int base_i, base_j;
HYPRE_Complex *diag_data;
HYPRE_Int *diag_i;
HYPRE_Int *diag_j;
HYPRE_Complex *offd_data;
HYPRE_Int *offd_i;
HYPRE_Int *offd_j;
HYPRE_BigInt *tmp_j;
HYPRE_BigInt *aux_offd_j;
HYPRE_BigInt I, J;
HYPRE_Int myid, num_procs, i, i2, j;
char new_filename[255];
FILE *file;
HYPRE_Int num_cols_offd, num_nonzeros_diag, num_nonzeros_offd;
HYPRE_Int equal, i_col, num_cols;
HYPRE_Int diag_cnt, offd_cnt, row_cnt;
HYPRE_Complex data;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename,"%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n");
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b", &global_num_rows, &global_num_cols);
hypre_fscanf(file, "%d %d %d", &num_rows, &num_cols, &num_cols_offd);
hypre_fscanf(file, "%d %d", &num_nonzeros_diag, &num_nonzeros_offd);
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i = 0; i <= num_procs; i++)
hypre_fscanf(file, "%b %b", &row_starts[i], &col_starts[i]);
big_base_i = row_starts[0];
big_base_j = col_starts[0];
base_i = (HYPRE_Int)row_starts[0];
base_j = (HYPRE_Int)col_starts[0];
equal = 1;
for (i = 0; i <= num_procs; i++)
{
row_starts[i] -= big_base_i;
col_starts[i] -= big_base_j;
if (row_starts[i] != col_starts[i]) equal = 0;
}
if (equal)
{
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
col_starts = row_starts;
}
matrix = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
row_starts, col_starts, num_cols_offd,
num_nonzeros_diag, num_nonzeros_offd);
hypre_ParCSRMatrixInitialize(matrix);
diag = hypre_ParCSRMatrixDiag(matrix);
offd = hypre_ParCSRMatrixOffd(matrix);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
offd_i = hypre_CSRMatrixI(offd);
if (num_nonzeros_offd)
{
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
tmp_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST);
}
first_row_index = hypre_ParCSRMatrixFirstRowIndex(matrix);
first_col_diag = hypre_ParCSRMatrixFirstColDiag(matrix);
last_col_diag = first_col_diag+(HYPRE_BigInt)num_cols-1;
diag_cnt = 0;
offd_cnt = 0;
row_cnt = 0;
for (i = 0; i < num_nonzeros_diag+num_nonzeros_offd; i++)
{
/* read values */
hypre_fscanf(file, "%b %b %le", &I, &J, &data);
i2 = (HYPRE_Int)(I-big_base_i-first_row_index);
J -= big_base_j;
if (i2 > row_cnt)
{
diag_i[i2] = diag_cnt;
offd_i[i2] = offd_cnt;
row_cnt++;
}
if (J < first_col_diag || J > last_col_diag)
{
tmp_j[offd_cnt] = J;
offd_data[offd_cnt++] = data;
}
else
{
diag_j[diag_cnt] = (HYPRE_Int)(J - first_col_diag);
diag_data[diag_cnt++] = data;
}
}
diag_i[num_rows] = diag_cnt;
offd_i[num_rows] = offd_cnt;
fclose(file);
/* generate col_map_offd */
if (num_nonzeros_offd)
{
aux_offd_j = hypre_CTAlloc(HYPRE_BigInt, num_nonzeros_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_nonzeros_offd; i++)
aux_offd_j[i] = (HYPRE_BigInt)offd_j[i];
hypre_BigQsort0(aux_offd_j,0,num_nonzeros_offd-1);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
col_map_offd[0] = aux_offd_j[0];
offd_cnt = 0;
for (i=1; i < num_nonzeros_offd; i++)
{
if (aux_offd_j[i] > col_map_offd[offd_cnt])
col_map_offd[++offd_cnt] = aux_offd_j[i];
}
for (i=0; i < num_nonzeros_offd; i++)
{
offd_j[i] = hypre_BigBinarySearch(col_map_offd, tmp_j[i], num_cols_offd);
}
hypre_TFree(aux_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_j, HYPRE_MEMORY_HOST);
}
/* move diagonal element in first position in each row */
for (i=0; i < num_rows; i++)
{
i_col = diag_i[i];
for (j=i_col; j < diag_i[i+1]; j++)
{
if (diag_j[j] == i)
{
diag_j[j] = diag_j[i_col];
data = diag_data[j];
diag_data[j] = diag_data[i_col];
diag_data[i_col] = data;
diag_j[i_col] = i;
break;
}
}
}
*base_i_ptr = base_i;
*base_j_ptr = base_j;
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetLocalRange
* returns the row numbers of the rows stored on this processor.
* "End" is actually the row number of the last row on this processor.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetLocalRange( hypre_ParCSRMatrix *matrix,
HYPRE_BigInt *row_start,
HYPRE_BigInt *row_end,
HYPRE_BigInt *col_start,
HYPRE_BigInt *col_end )
{
HYPRE_Int my_id;
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(matrix), &my_id );
#ifdef HYPRE_NO_GLOBAL_PARTITION
*row_start = hypre_ParCSRMatrixFirstRowIndex(matrix);
*row_end = hypre_ParCSRMatrixLastRowIndex(matrix);
*col_start = hypre_ParCSRMatrixFirstColDiag(matrix);
*col_end = hypre_ParCSRMatrixLastColDiag(matrix);
#else
*row_start = hypre_ParCSRMatrixRowStarts(matrix)[ my_id ];
*row_end = hypre_ParCSRMatrixRowStarts(matrix)[ my_id + 1 ]-1;
*col_start = hypre_ParCSRMatrixColStarts(matrix)[ my_id ];
*col_end = hypre_ParCSRMatrixColStarts(matrix)[ my_id + 1 ]-1;
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixGetRow
* Returns global column indices and/or values for a given row in the global
* matrix. Global row number is used, but the row must be stored locally or
* an error is returned. This implementation copies from the two matrices that
* store the local data, storing them in the hypre_ParCSRMatrix structure.
* Only a single row can be accessed via this function at any one time; the
* corresponding RestoreRow function must be called, to avoid bleeding memory,
* and to be able to look at another row.
* Either one of col_ind and values can be left null, and those values will
* not be returned.
* All indices are returned in 0-based indexing, no matter what is used under
* the hood. EXCEPTION: currently this only works if the local CSR matrices
* use 0-based indexing.
* This code, semantics, implementation, etc., are all based on PETSc's hypre_MPI_AIJ
* matrix code, adjusted for our data and software structures.
* AJC 4/99.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixGetRow( hypre_ParCSRMatrix *mat,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
HYPRE_Int my_id;
HYPRE_BigInt row_start, row_end;
hypre_CSRMatrix *Aa;
hypre_CSRMatrix *Ba;
if (!mat)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat);
Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat);
if (hypre_ParCSRMatrixGetrowactive(mat)) return(-1);
hypre_MPI_Comm_rank( hypre_ParCSRMatrixComm(mat), &my_id );
hypre_ParCSRMatrixGetrowactive(mat) = 1;
#ifdef HYPRE_NO_GLOBAL_PARTITION
row_start = hypre_ParCSRMatrixFirstRowIndex(mat);
row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1;
#else
row_end = hypre_ParCSRMatrixRowStarts(mat)[ my_id + 1 ];
row_start = hypre_ParCSRMatrixRowStarts(mat)[ my_id ];
#endif
if (row < row_start || row >= row_end) return(-1);
/* if buffer is not allocated and some information is requested,
allocate buffer */
if (!hypre_ParCSRMatrixRowvalues(mat) && ( col_ind || values ))
{
/*
allocate enough space to hold information from the longest row.
*/
HYPRE_Int max = 1,tmp;
HYPRE_Int i;
HYPRE_Int m = row_end-row_start;
for ( i=0; i<m; i++ ) {
tmp = hypre_CSRMatrixI(Aa)[i+1] - hypre_CSRMatrixI(Aa)[i] +
hypre_CSRMatrixI(Ba)[i+1] - hypre_CSRMatrixI(Ba)[i];
if (max < tmp) { max = tmp; }
}
hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_CTAlloc( HYPRE_Complex, max , HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_CTAlloc( HYPRE_BigInt, max , HYPRE_MEMORY_HOST);
}
/* Copy from dual sequential matrices into buffer */
{
HYPRE_Complex *vworkA, *vworkB, *v_p;
HYPRE_Int i, *cworkA, *cworkB;
HYPRE_BigInt cstart = hypre_ParCSRMatrixFirstColDiag(mat);
HYPRE_Int nztot, nzA, nzB, lrow=(HYPRE_Int)(row-row_start);
HYPRE_BigInt *cmap, *idx_p;
nzA = hypre_CSRMatrixI(Aa)[lrow+1]-hypre_CSRMatrixI(Aa)[lrow];
cworkA = &( hypre_CSRMatrixJ(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
vworkA = &( hypre_CSRMatrixData(Aa)[ hypre_CSRMatrixI(Aa)[lrow] ] );
nzB = hypre_CSRMatrixI(Ba)[lrow+1]-hypre_CSRMatrixI(Ba)[lrow];
cworkB = &( hypre_CSRMatrixJ(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
vworkB = &( hypre_CSRMatrixData(Ba)[ hypre_CSRMatrixI(Ba)[lrow] ] );
nztot = nzA + nzB;
cmap = hypre_ParCSRMatrixColMapOffd(mat);
if (values || col_ind) {
if (nztot) {
/* Sort by increasing column numbers, assuming A and B already sorted */
HYPRE_Int imark = -1;
if (values) {
*values = v_p = hypre_ParCSRMatrixRowvalues(mat);
for ( i=0; i<nzB; i++ ) {
if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
else break;
}
imark = i;
for ( i=0; i<nzA; i++ ) v_p[imark+i] = vworkA[i];
for ( i=imark; i<nzB; i++ ) v_p[nzA+i] = vworkB[i];
}
if (col_ind) {
*col_ind = idx_p = hypre_ParCSRMatrixRowindices(mat);
if (imark > -1) {
for ( i=0; i<imark; i++ ) {
idx_p[i] = cmap[cworkB[i]];
}
} else {
for ( i=0; i<nzB; i++ ) {
if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
else break;
}
imark = i;
}
for ( i=0; i<nzA; i++ ) idx_p[imark+i] = cstart + cworkA[i];
for ( i=imark; i<nzB; i++ ) idx_p[nzA+i] = cmap[cworkB[i]];
}
}
else {
if (col_ind) *col_ind = 0;
if (values) *values = 0;
}
}
*size = nztot;
} /* End of copy */
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixRestoreRow
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixRestoreRow( hypre_ParCSRMatrix *matrix,
HYPRE_BigInt row,
HYPRE_Int *size,
HYPRE_BigInt **col_ind,
HYPRE_Complex **values )
{
if (!hypre_ParCSRMatrixGetrowactive(matrix))
{
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
hypre_ParCSRMatrixGetrowactive(matrix)=0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixToParCSRMatrix:
* generates a ParCSRMatrix distributed across the processors in comm
* from a CSRMatrix on proc 0 .
*
* This shouldn't be used with the HYPRE_NO_GLOBAL_PARTITON option
*
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix *
hypre_CSRMatrixToParCSRMatrix( MPI_Comm comm,
hypre_CSRMatrix *A,
HYPRE_BigInt *row_starts,
HYPRE_BigInt *col_starts )
{
HYPRE_BigInt *global_data;
HYPRE_BigInt global_size;
HYPRE_BigInt global_num_rows;
HYPRE_BigInt global_num_cols;
HYPRE_Int *local_num_rows;
HYPRE_Int num_procs, my_id;
HYPRE_Int *local_num_nonzeros=NULL;
HYPRE_Int num_nonzeros;
HYPRE_Complex *a_data;
HYPRE_Int *a_i;
HYPRE_Int *a_j;
hypre_CSRMatrix *local_A;
hypre_MPI_Request *requests;
hypre_MPI_Status *status, status0;
hypre_MPI_Datatype *csr_matrix_datatypes;
hypre_ParCSRMatrix *par_matrix;
HYPRE_BigInt first_col_diag;
HYPRE_BigInt last_col_diag;
HYPRE_Int i, j, ind;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
global_data = hypre_CTAlloc(HYPRE_BigInt, 2*num_procs+6, HYPRE_MEMORY_HOST);
if (my_id == 0)
{
global_size = 3;
if (row_starts)
{
if (col_starts)
{
if (col_starts != row_starts)
{
/* contains code for what to expect,
if 0: row_starts = col_starts, only row_starts given
if 1: only row_starts given, col_starts = NULL
if 2: both row_starts and col_starts given
if 3: only col_starts given, row_starts = NULL */
global_data[3] = 2;
global_size = (HYPRE_BigInt)(2*num_procs+6);
for (i=0; i < num_procs+1; i++)
global_data[i+4] = row_starts[i];
for (i=0; i < num_procs+1; i++)
global_data[i+num_procs+5] = col_starts[i];
}
else
{
global_data[3] = 0;
global_size = (HYPRE_BigInt)num_procs+5;
for (i=0; i < num_procs+1; i++)
global_data[i+4] = row_starts[i];
}
}
else
{
global_data[3] = 1;
global_size = (HYPRE_BigInt)num_procs+5;
for (i=0; i < num_procs+1; i++)
global_data[i+4] = row_starts[i];
}
}
else
{
if (col_starts)
{
global_data[3] = 3;
global_size = (HYPRE_BigInt)num_procs+5;
for (i=0; i < num_procs+1; i++)
global_data[i+4] = col_starts[i];
}
}
global_data[0] = (HYPRE_BigInt)hypre_CSRMatrixNumRows(A);
global_data[1] = (HYPRE_BigInt)hypre_CSRMatrixNumCols(A);
global_data[2] = global_size;
a_data = hypre_CSRMatrixData(A);
a_i = hypre_CSRMatrixI(A);
a_j = hypre_CSRMatrixJ(A);
}
hypre_MPI_Bcast(global_data,3,HYPRE_MPI_BIG_INT,0,comm);
global_num_rows = global_data[0];
global_num_cols = global_data[1];
global_size = global_data[2];
if (global_size > 3)
{
hypre_MPI_Bcast(&global_data[3],global_size-3,HYPRE_MPI_BIG_INT,0,comm);
if (my_id > 0)
{
if (global_data[3] < 3)
{
row_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i=0; i< num_procs+1; i++)
{
row_starts[i] = global_data[i+4];
}
if (global_data[3] == 0)
col_starts = row_starts;
if (global_data[3] == 2)
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i=0; i < num_procs+1; i++)
{
col_starts[i] = global_data[i+num_procs+5];
}
}
}
else
{
col_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST);
for (i=0; i< num_procs+1; i++)
{
col_starts[i] = global_data[i+4];
}
}
}
}
hypre_TFree(global_data, HYPRE_MEMORY_HOST);
local_num_rows = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
csr_matrix_datatypes = hypre_CTAlloc(hypre_MPI_Datatype, num_procs, HYPRE_MEMORY_HOST);
par_matrix = hypre_ParCSRMatrixCreate(
comm, global_num_rows, global_num_cols,row_starts,col_starts,0,0,0);
row_starts = hypre_ParCSRMatrixRowStarts(par_matrix);
col_starts = hypre_ParCSRMatrixColStarts(par_matrix);
for (i=0; i < num_procs; i++)
local_num_rows[i] = (HYPRE_Int)(row_starts[i+1] - row_starts[i]);
if (my_id == 0)
{
local_num_nonzeros = hypre_CTAlloc(HYPRE_Int, num_procs, HYPRE_MEMORY_HOST);
for (i=0; i < num_procs-1; i++)
local_num_nonzeros[i] = a_i[(HYPRE_Int)row_starts[i+1]]
- a_i[(HYPRE_Int)row_starts[i]];
local_num_nonzeros[num_procs-1] = a_i[(HYPRE_Int)global_num_rows]
- a_i[(HYPRE_Int)row_starts[num_procs-1]];
}
hypre_MPI_Scatter(local_num_nonzeros,1,HYPRE_MPI_INT,&num_nonzeros,1,
HYPRE_MPI_INT,0,comm);
if (my_id == 0) num_nonzeros = local_num_nonzeros[0];
local_A = hypre_CSRMatrixCreate(local_num_rows[my_id], (HYPRE_Int)global_num_cols,
num_nonzeros);
if (my_id == 0)
{
requests = hypre_CTAlloc(hypre_MPI_Request, num_procs-1, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_procs-1, HYPRE_MEMORY_HOST);
j=0;
for (i=1; i < num_procs; i++)
{
ind = a_i[(HYPRE_Int)row_starts[i]];
hypre_BuildCSRMatrixMPIDataType(local_num_nonzeros[i],
local_num_rows[i],
&a_data[ind],
&a_i[(HYPRE_Int)row_starts[i]],
&a_j[ind],
&csr_matrix_datatypes[i]);
hypre_MPI_Isend(hypre_MPI_BOTTOM, 1, csr_matrix_datatypes[i], i, 0, comm,
&requests[j++]);
hypre_MPI_Type_free(&csr_matrix_datatypes[i]);
}
hypre_CSRMatrixData(local_A) = a_data;
hypre_CSRMatrixI(local_A) = a_i;
hypre_CSRMatrixJ(local_A) = a_j;
hypre_CSRMatrixOwnsData(local_A) = 0;
hypre_MPI_Waitall(num_procs-1,requests,status);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(local_num_nonzeros, HYPRE_MEMORY_HOST);
}
else
{
hypre_CSRMatrixInitialize(local_A);
hypre_BuildCSRMatrixMPIDataType(num_nonzeros,
local_num_rows[my_id],
hypre_CSRMatrixData(local_A),
hypre_CSRMatrixI(local_A),
hypre_CSRMatrixJ(local_A),
csr_matrix_datatypes);
hypre_MPI_Recv(hypre_MPI_BOTTOM,1,csr_matrix_datatypes[0],0,0,comm,&status0);
hypre_MPI_Type_free(csr_matrix_datatypes);
}
first_col_diag = col_starts[my_id];
last_col_diag = col_starts[my_id+1]-1;
GenerateDiagAndOffd(local_A, par_matrix, first_col_diag, last_col_diag);
/* set pointers back to NULL before destroying */
if (my_id == 0)
{
hypre_CSRMatrixData(local_A) = NULL;
hypre_CSRMatrixI(local_A) = NULL;
hypre_CSRMatrixJ(local_A) = NULL;
}
hypre_CSRMatrixDestroy(local_A);
hypre_TFree(local_num_rows, HYPRE_MEMORY_HOST);
hypre_TFree(csr_matrix_datatypes, HYPRE_MEMORY_HOST);
return par_matrix;
}
HYPRE_Int
GenerateDiagAndOffd(hypre_CSRMatrix *A,
hypre_ParCSRMatrix *matrix,
HYPRE_BigInt first_col_diag,
HYPRE_BigInt last_col_diag)
{
HYPRE_Int i, j;
HYPRE_Int jo, jd;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *a_data = hypre_CSRMatrixData(A);
HYPRE_Int *a_i = hypre_CSRMatrixI(A);
HYPRE_Int *a_j = hypre_CSRMatrixJ(A);
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(matrix);
HYPRE_BigInt *col_map_offd;
HYPRE_Complex *diag_data, *offd_data;
HYPRE_Int *diag_i, *offd_i;
HYPRE_Int *diag_j, *offd_j;
HYPRE_Int *marker;
HYPRE_Int num_cols_diag, num_cols_offd;
HYPRE_Int first_elmt = a_i[0];
HYPRE_Int num_nonzeros = a_i[num_rows]-first_elmt;
HYPRE_Int counter;
num_cols_diag = (HYPRE_Int)(last_col_diag - first_col_diag +1);
num_cols_offd = 0;
if (num_cols - num_cols_diag)
{
hypre_CSRMatrixInitialize(diag);
diag_i = hypre_CSRMatrixI(diag);
hypre_CSRMatrixInitialize(offd);
offd_i = hypre_CSRMatrixI(offd);
marker = hypre_CTAlloc(HYPRE_Int, num_cols, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols; i++)
marker[i] = 0;
jo = 0;
jd = 0;
for (i=0; i < num_rows; i++)
{
offd_i[i] = jo;
diag_i[i] = jd;
for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
if (a_j[j] < first_col_diag || a_j[j] > last_col_diag)
{
if (!marker[a_j[j]])
{
marker[a_j[j]] = 1;
num_cols_offd++;
}
jo++;
}
else
{
jd++;
}
}
offd_i[num_rows] = jo;
diag_i[num_rows] = jd;
hypre_ParCSRMatrixColMapOffd(matrix) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
col_map_offd = hypre_ParCSRMatrixColMapOffd(matrix);
counter = 0;
for (i=0; i < num_cols; i++)
if (marker[i])
{
col_map_offd[counter] = (HYPRE_BigInt)i;
marker[i] = counter;
counter++;
}
hypre_CSRMatrixNumNonzeros(diag) = jd;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_j = hypre_CSRMatrixJ(diag);
hypre_CSRMatrixNumNonzeros(offd) = jo;
hypre_CSRMatrixNumCols(offd) = num_cols_offd;
hypre_CSRMatrixInitialize(offd);
offd_data = hypre_CSRMatrixData(offd);
offd_j = hypre_CSRMatrixJ(offd);
jo = 0;
jd = 0;
for (i=0; i < num_rows; i++)
{
for (j=a_i[i]-first_elmt; j < a_i[i+1]-first_elmt; j++)
if (a_j[j] < (HYPRE_Int)first_col_diag || a_j[j] > (HYPRE_Int)last_col_diag)
{
offd_data[jo] = a_data[j];
offd_j[jo++] = marker[a_j[j]];
}
else
{
diag_data[jd] = a_data[j];
diag_j[jd++] = (HYPRE_Int)(a_j[j]-first_col_diag);
}
}
hypre_TFree(marker, HYPRE_MEMORY_HOST);
}
else
{
hypre_CSRMatrixNumNonzeros(diag) = num_nonzeros;
hypre_CSRMatrixInitialize(diag);
diag_data = hypre_CSRMatrixData(diag);
diag_i = hypre_CSRMatrixI(diag);
diag_j = hypre_CSRMatrixJ(diag);
for (i=0; i < num_nonzeros; i++)
{
diag_data[i] = a_data[i];
diag_j[i] = a_j[i];
}
offd_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
for (i=0; i < num_rows+1; i++)
{
diag_i[i] = a_i[i];
offd_i[i] = 0;
}
hypre_CSRMatrixNumCols(offd) = 0;
hypre_CSRMatrixI(offd) = offd_i;
}
return hypre_error_flag;
}
hypre_CSRMatrix *
hypre_MergeDiagAndOffd(hypre_ParCSRMatrix *par_matrix)
{
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(par_matrix);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(par_matrix);
hypre_CSRMatrix *matrix;
HYPRE_BigInt num_cols = hypre_ParCSRMatrixGlobalNumCols(par_matrix);
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(par_matrix);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(par_matrix);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(diag);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag);
HYPRE_Complex *diag_data = hypre_CSRMatrixData(diag);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd);
HYPRE_Complex *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int *matrix_i;
HYPRE_BigInt *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int num_nonzeros, i, j;
HYPRE_Int count;
HYPRE_Int size, rest, num_threads, ii;
num_nonzeros = diag_i[num_rows] + offd_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows,num_cols,num_nonzeros);
hypre_CSRMatrixBigInitialize(matrix);
matrix_i = hypre_CSRMatrixI(matrix);
matrix_j = hypre_CSRMatrixBigJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
num_threads = hypre_NumThreads();
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii, i, j, count) HYPRE_SMP_SCHEDULE
#endif
for (ii=0; ii < num_threads; ii++)
{
HYPRE_Int ns, ne;
if (ii < rest)
{
ns = ii*size+ii;
ne = (ii+1)*size+ii+1;
}
else
{
ns = ii*size+rest;
ne = (ii+1)*size+rest;
}
count = diag_i[ns]+offd_i[ns];;
for (i=ns; i < ne; i++)
{
matrix_i[i] = count;
for (j=diag_i[i]; j < diag_i[i+1]; j++)
{
matrix_data[count] = diag_data[j];
matrix_j[count++] = (HYPRE_BigInt)diag_j[j]+first_col_diag;
}
for (j=offd_i[i]; j < offd_i[i+1]; j++)
{
matrix_data[count] = offd_data[j];
matrix_j[count++] = col_map_offd[offd_j[j]];
}
}
} /* end parallel region */
matrix_i[num_rows] = num_nonzeros;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixToCSRMatrixAll:
* generates a CSRMatrix from a ParCSRMatrix on all processors that have
* parts of the ParCSRMatrix
* Warning: this only works for a ParCSRMatrix that is smaller than 2^31-1
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ParCSRMatrixToCSRMatrixAll(hypre_ParCSRMatrix *par_matrix)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(par_matrix);
hypre_CSRMatrix *matrix;
hypre_CSRMatrix *local_matrix;
HYPRE_Int num_rows = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(par_matrix);
HYPRE_Int num_cols = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumCols(par_matrix);
#ifndef HYPRE_NO_GLOBAL_PARTITION
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(par_matrix);
#endif
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Complex *matrix_data;
HYPRE_Int *local_matrix_i;
HYPRE_Int *local_matrix_j;
HYPRE_Complex *local_matrix_data;
HYPRE_Int i, j;
HYPRE_Int local_num_rows;
HYPRE_Int local_num_nonzeros;
HYPRE_Int num_nonzeros;
HYPRE_Int num_data;
HYPRE_Int num_requests;
HYPRE_Int vec_len, offset;
HYPRE_Int start_index;
HYPRE_Int proc_id;
HYPRE_Int num_procs, my_id;
HYPRE_Int num_types;
HYPRE_Int *used_procs;
hypre_MPI_Request *requests;
hypre_MPI_Status *status;
#ifdef HYPRE_NO_GLOBAL_PARTITION
HYPRE_Int *new_vec_starts;
HYPRE_Int num_contacts;
HYPRE_Int contact_proc_list[1];
HYPRE_Int contact_send_buf[1];
HYPRE_Int contact_send_buf_starts[2];
HYPRE_Int max_response_size;
HYPRE_Int *response_recv_buf=NULL;
HYPRE_Int *response_recv_buf_starts = NULL;
hypre_DataExchangeResponse response_obj;
hypre_ProcListElements send_proc_obj;
HYPRE_Int *send_info = NULL;
hypre_MPI_Status status1;
HYPRE_Int count, tag1 = 11112, tag2 = 22223, tag3 = 33334;
HYPRE_Int start;
#endif
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
local_num_rows = (HYPRE_Int)(hypre_ParCSRMatrixLastRowIndex(par_matrix) -
hypre_ParCSRMatrixFirstRowIndex(par_matrix) + 1);
local_matrix = hypre_MergeDiagAndOffd(par_matrix); /* creates matrix */
hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */
local_matrix_i = hypre_CSRMatrixI(local_matrix);
local_matrix_j = hypre_CSRMatrixJ(local_matrix);
local_matrix_data = hypre_CSRMatrixData(local_matrix);
/* determine procs that have vector data and store their ids in used_procs */
/* we need to do an exchange data for this. If I own row then I will contact
processor 0 with the endpoint of my local range */
if (local_num_rows > 0)
{
num_contacts = 1;
contact_proc_list[0] = 0;
contact_send_buf[0] = (HYPRE_Int)hypre_ParCSRMatrixLastRowIndex(par_matrix);
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 1;
}
else
{
num_contacts = 0;
contact_send_buf_starts[0] = 0;
contact_send_buf_starts[1] = 0;
}
/*build the response object*/
/*send_proc_obj will be for saving info from contacts */
send_proc_obj.length = 0;
send_proc_obj.storage_length = 10;
send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts =
hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST);
send_proc_obj.vec_starts[0] = 0;
send_proc_obj.element_storage_length = 10;
send_proc_obj.elements =
hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST);
max_response_size = 0; /* each response is null */
response_obj.fill_response = hypre_FillResponseParToCSRMatrix;
response_obj.data1 = NULL;
response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/
hypre_DataExchangeList(num_contacts,
contact_proc_list, contact_send_buf,
contact_send_buf_starts, sizeof(HYPRE_Int),
sizeof(HYPRE_Int), &response_obj,
max_response_size, 1,
comm, (void**) &response_recv_buf,
&response_recv_buf_starts);
/* now processor 0 should have a list of ranges for processors that have rows -
these are in send_proc_obj - it needs to create the new list of processors
and also an array of vec starts - and send to those who own row*/
if (my_id)
{
if (local_num_rows)
{
/* look for a message from processor 0 */
hypre_MPI_Probe(0, tag1, comm, &status1);
hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count);
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1);
/* now unpack */
num_types = send_info[0];
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST);
for (i=1; i<= num_types; i++)
{
used_procs[i-1] = send_info[i];
}
for (i=num_types+1; i< count; i++)
{
new_vec_starts[i-num_types-1] = send_info[i] ;
}
}
else /* clean up and exit */
{
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST);
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
return NULL;
}
}
else /* my_id ==0 */
{
num_types = send_proc_obj.length;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST);
new_vec_starts[0] = 0;
for (i=0; i< num_types; i++)
{
used_procs[i] = send_proc_obj.id[i];
new_vec_starts[i+1] = send_proc_obj.elements[i]+1;
}
hypre_qsort0(used_procs, 0, num_types-1);
hypre_qsort0(new_vec_starts, 0, num_types);
/*now we need to put into an array to send */
count = 2*num_types+2;
send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
send_info[0] = num_types;
for (i=1; i<= num_types; i++)
{
send_info[i] = (HYPRE_BigInt)used_procs[i-1];
}
for (i=num_types+1; i< count; i++)
{
send_info[i] = new_vec_starts[i-num_types-1];
}
requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST);
/* don't send to myself - these are sorted so my id would be first*/
start = 0;
if (num_types && used_procs[0] == 0)
{
start = 1;
}
for (i=start; i < num_types; i++)
{
hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1,
comm, &requests[i-start]);
}
hypre_MPI_Waitall(num_types-start, requests, status);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(requests, HYPRE_MEMORY_HOST);
}
/* clean up */
hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST);
hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST);
hypre_TFree(send_info, HYPRE_MEMORY_HOST);
if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST);
if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST);
/* now proc 0 can exit if it has no rows */
if (!local_num_rows)
{
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
return NULL;
}
/* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */
/* this matrix should be rather small */
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
num_requests = 4*num_types;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
/* exchange contents of local_matrix_i - here we are sending to ourself also*/
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]);
hypre_MPI_Irecv(&matrix_i[new_vec_starts[i]+1], vec_len, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT,
proc_id, tag2, comm, &requests[j++]);
}
hypre_MPI_Waitall(j, requests, status);
/* generate matrix_i from received data */
/* global numbering?*/
offset = matrix_i[new_vec_starts[1]];
for (i=1; i < num_types; i++)
{
for (j = new_vec_starts[i]; j < new_vec_starts[i+1]; j++)
matrix_i[j+1] += offset;
offset = matrix_i[new_vec_starts[i+1]];
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros);
hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
/* generate datatypes for further data exchange and exchange remaining
data, i.e. column info and actual data */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
start_index = matrix_i[(HYPRE_Int)new_vec_starts[i]];
num_data = matrix_i[(HYPRE_Int)new_vec_starts[i+1]] - start_index;
hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
local_num_nonzeros = local_matrix_i[local_num_rows];
for (i=0; i < num_types; i++)
{
hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX,
used_procs[i], tag1, comm, &requests[j++]);
hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT,
used_procs[i], tag3, comm, &requests[j++]);
}
hypre_MPI_Waitall(num_requests, requests, status);
hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST);
#else
local_num_rows = (HYPRE_Int)(row_starts[my_id+1] - row_starts[my_id]);
/* if my_id contains no data, return NULL */
if (!local_num_rows)
return NULL;
local_matrix = hypre_MergeDiagAndOffd(par_matrix);
hypre_CSRMatrixBigJtoJ(local_matrix); /* copies big_j to j */
local_matrix_i = hypre_CSRMatrixI(local_matrix);
local_matrix_j = hypre_CSRMatrixJ(local_matrix);
local_matrix_data = hypre_CSRMatrixData(local_matrix);
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows+1, HYPRE_MEMORY_HOST);
/* determine procs that have vector data and store their ids in used_procs */
num_types = 0;
for (i=0; i < num_procs; i++)
if (row_starts[i+1]-row_starts[i] && i-my_id)
num_types++;
num_requests = 4*num_types;
used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST);
j = 0;
for (i=0; i < num_procs; i++)
if (row_starts[i+1]-row_starts[i] && i-my_id)
used_procs[j++] = i;
requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST);
status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST);
/* data_type = hypre_CTAlloc(hypre_MPI_Datatype, num_types+1); */
/* exchange contents of local_matrix_i */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
vec_len = (HYPRE_Int)(row_starts[proc_id+1] - row_starts[proc_id]);
hypre_MPI_Irecv(&matrix_i[(HYPRE_Int)row_starts[proc_id]+1], vec_len, HYPRE_MPI_INT,
proc_id, 0, comm, &requests[j++]);
}
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
hypre_MPI_Isend(&local_matrix_i[1], local_num_rows, HYPRE_MPI_INT,
proc_id, 0, comm, &requests[j++]);
}
vec_len = (HYPRE_Int)(row_starts[my_id+1] - row_starts[my_id]);
for (i=1; i <= vec_len; i++)
matrix_i[(HYPRE_Int)row_starts[my_id]+i] = local_matrix_i[i];
hypre_MPI_Waitall(j, requests, status);
/* generate matrix_i from received data */
offset = matrix_i[(HYPRE_Int)row_starts[1]];
for (i=1; i < num_procs; i++)
{
for (j = (HYPRE_Int)row_starts[i]; j < (HYPRE_Int)row_starts[i+1]; j++)
matrix_i[j+1] += offset;
offset = matrix_i[(HYPRE_Int)row_starts[i+1]];
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_cols, num_nonzeros);
hypre_CSRMatrixMemoryLocation(matrix) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
matrix_data = hypre_CSRMatrixData(matrix);
/* generate datatypes for further data exchange and exchange remaining
data, i.e. column info and actual data */
j = 0;
for (i = 0; i < num_types; i++)
{
proc_id = used_procs[i];
start_index = matrix_i[(HYPRE_Int)row_starts[proc_id]];
num_data = matrix_i[(HYPRE_Int)row_starts[proc_id+1]] - start_index;
hypre_MPI_Irecv(&matrix_data[start_index], num_data, HYPRE_MPI_COMPLEX,
used_procs[i], 0, comm, &requests[j++]);
hypre_MPI_Irecv(&matrix_j[start_index], num_data, HYPRE_MPI_INT,
used_procs[i], 0, comm, &requests[j++]);
}
local_num_nonzeros = local_matrix_i[local_num_rows];
for (i=0; i < num_types; i++)
{
hypre_MPI_Isend(local_matrix_data, local_num_nonzeros, HYPRE_MPI_COMPLEX,
used_procs[i], 0, comm, &requests[j++]);
hypre_MPI_Isend(local_matrix_j, local_num_nonzeros, HYPRE_MPI_INT,
used_procs[i], 0, comm, &requests[j++]);
}
start_index = matrix_i[(HYPRE_Int)row_starts[my_id]];
for (i=0; i < local_num_nonzeros; i++)
{
matrix_j[start_index+i] = local_matrix_j[i];
matrix_data[start_index+i] = local_matrix_data[i];
}
hypre_MPI_Waitall(num_requests, requests, status);
start_index = matrix_i[(HYPRE_Int)row_starts[my_id]];
for (i=0; i < local_num_nonzeros; i++)
{
matrix_j[start_index+i] = local_matrix_j[i];
matrix_data[start_index+i] = local_matrix_data[i];
}
hypre_MPI_Waitall(num_requests, requests, status);
#endif
if (hypre_CSRMatrixOwnsData(local_matrix))
hypre_CSRMatrixDestroy(local_matrix);
else
hypre_TFree(local_matrix, HYPRE_MEMORY_HOST);
if (num_requests)
{
hypre_TFree(requests, HYPRE_MEMORY_HOST);
hypre_TFree(status, HYPRE_MEMORY_HOST);
hypre_TFree(used_procs, HYPRE_MEMORY_HOST);
}
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixCopy,
* copies B to A,
* if copy_data = 0, only the structure of A is copied to B
* the routine does not check whether the dimensions of A and B are compatible
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixCopy( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B,
HYPRE_Int copy_data )
{
hypre_CSRMatrix *A_diag;
hypre_CSRMatrix *A_offd;
HYPRE_BigInt *col_map_offd_A;
hypre_CSRMatrix *B_diag;
hypre_CSRMatrix *B_offd;
HYPRE_BigInt *col_map_offd_B;
HYPRE_Int num_cols_offd_A;
HYPRE_Int num_cols_offd_B;
if (!A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!B)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
A_diag = hypre_ParCSRMatrixDiag(A);
A_offd = hypre_ParCSRMatrixOffd(A);
B_diag = hypre_ParCSRMatrixDiag(B);
B_offd = hypre_ParCSRMatrixOffd(B);
num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
hypre_assert(num_cols_offd_A == num_cols_offd_B);
col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
hypre_CSRMatrixCopy(A_diag, B_diag, copy_data);
hypre_CSRMatrixCopy(A_offd, B_offd, copy_data);
/* should not happen if B has been initialized */
if (num_cols_offd_B && col_map_offd_B == NULL)
{
col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B;
}
hypre_TMemcpy(col_map_offd_B, col_map_offd_A, HYPRE_BigInt, num_cols_offd_B,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------
* hypre_FillResponseParToCSRMatrix
* Fill response function for determining the send processors
* data exchange
*--------------------------------------------------------------------*/
HYPRE_Int
hypre_FillResponseParToCSRMatrix( void *p_recv_contact_buf,
HYPRE_Int contact_size,
HYPRE_Int contact_proc,
void *ro,
MPI_Comm comm,
void **p_send_response_buf,
HYPRE_Int *response_message_size )
{
HYPRE_Int myid;
HYPRE_Int i, index, count, elength;
HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf;
hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro;
hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2;
hypre_MPI_Comm_rank(comm, &myid );
/*check to see if we need to allocate more space in send_proc_obj for ids*/
if (send_proc_obj->length == send_proc_obj->storage_length)
{
send_proc_obj->storage_length +=10; /*add space for 10 more processors*/
send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int,
send_proc_obj->storage_length, HYPRE_MEMORY_HOST);
send_proc_obj->vec_starts =
hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int,
send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST);
}
/*initialize*/
count = send_proc_obj->length;
index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/
/*send proc*/
send_proc_obj->id[count] = contact_proc;
/*do we need more storage for the elements?*/
if (send_proc_obj->element_storage_length < index + contact_size)
{
elength = hypre_max(contact_size, 10);
elength += index;
send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements,
HYPRE_BigInt, elength, HYPRE_MEMORY_HOST);
send_proc_obj->element_storage_length = elength;
}
/*populate send_proc_obj*/
for (i=0; i< contact_size; i++)
{
send_proc_obj->elements[index++] = recv_contact_buf[i];
}
send_proc_obj->vec_starts[count+1] = index;
send_proc_obj->length++;
/*output - no message to return (confirmation) */
*response_message_size = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixUnion
* Creates and returns a new matrix whose elements are the union of A and B.
* Data is not copied, only structural information is created.
* A and B must have the same communicator, numbers and distributions of rows
* and columns (they can differ in which row-column pairs are nonzero, thus
* in which columns are in a offd block)
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix * hypre_ParCSRMatrixUnion( hypre_ParCSRMatrix * A,
hypre_ParCSRMatrix * B )
{
hypre_ParCSRMatrix * C;
HYPRE_BigInt * col_map_offd_C = NULL;
HYPRE_Int num_procs, my_id, p;
MPI_Comm comm = hypre_ParCSRMatrixComm( A );
hypre_MPI_Comm_rank(comm,&my_id);
hypre_MPI_Comm_size(comm,&num_procs);
C = hypre_CTAlloc( hypre_ParCSRMatrix, 1 , HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm( C ) = hypre_ParCSRMatrixComm( A );
hypre_ParCSRMatrixGlobalNumRows( C ) = hypre_ParCSRMatrixGlobalNumRows( A );
hypre_ParCSRMatrixGlobalNumCols( C ) = hypre_ParCSRMatrixGlobalNumCols( A );
hypre_ParCSRMatrixFirstRowIndex( C ) = hypre_ParCSRMatrixFirstRowIndex( A );
hypre_assert( hypre_ParCSRMatrixFirstRowIndex( B )
== hypre_ParCSRMatrixFirstRowIndex( A ) );
hypre_ParCSRMatrixRowStarts( C ) = hypre_ParCSRMatrixRowStarts( A );
hypre_ParCSRMatrixOwnsRowStarts( C ) = 0;
hypre_ParCSRMatrixColStarts( C ) = hypre_ParCSRMatrixColStarts( A );
hypre_ParCSRMatrixOwnsColStarts( C ) = 0;
for ( p=0; p<=num_procs; ++p )
hypre_assert( hypre_ParCSRMatrixColStarts(A)
== hypre_ParCSRMatrixColStarts(B) );
hypre_ParCSRMatrixFirstColDiag( C ) = hypre_ParCSRMatrixFirstColDiag( A );
hypre_ParCSRMatrixLastRowIndex( C ) = hypre_ParCSRMatrixLastRowIndex( A );
hypre_ParCSRMatrixLastColDiag( C ) = hypre_ParCSRMatrixLastColDiag( A );
hypre_ParCSRMatrixDiag( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixDiag(A), hypre_ParCSRMatrixDiag(B),
0, 0, 0 );
hypre_ParCSRMatrixOffd( C ) =
hypre_CSRMatrixUnion( hypre_ParCSRMatrixOffd(A), hypre_ParCSRMatrixOffd(B),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixColMapOffd(B), &col_map_offd_C );
hypre_ParCSRMatrixColMapOffd( C ) = col_map_offd_C;
hypre_ParCSRMatrixCommPkg( C ) = NULL;
hypre_ParCSRMatrixCommPkgT( C ) = NULL;
hypre_ParCSRMatrixOwnsData( C ) = 1;
/* SetNumNonzeros, SetDNumNonzeros are global, need hypre_MPI_Allreduce.
I suspect, but don't know, that other parts of hypre do not assume that
the correct values have been set.
hypre_ParCSRMatrixSetNumNonzeros( C );
hypre_ParCSRMatrixSetDNumNonzeros( C );*/
hypre_ParCSRMatrixNumNonzeros( C ) = 0;
hypre_ParCSRMatrixDNumNonzeros( C ) = 0.0;
hypre_ParCSRMatrixRowindices( C ) = NULL;
hypre_ParCSRMatrixRowvalues( C ) = NULL;
hypre_ParCSRMatrixGetrowactive( C ) = 0;
return C;
}
/* drop the entries that are not on the diagonal and smaller than
* its row norm: type 1: 1-norm, 2: 2-norm, -1: infinity norm */
HYPRE_Int
hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A,
HYPRE_Real tol,
HYPRE_Int type)
{
HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *marker_offd = NULL;
HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int my_id, num_procs;
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
if (tol <= 0.0)
{
return hypre_error_flag;
}
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0;
for (i = 0; i < nrow_local; i++)
{
/* compute row norm */
HYPRE_Real row_nrm = 0.0;
for (j = A_diag_i_i; j < A_diag_i[i+1]; j++)
{
HYPRE_Complex v = A_diag_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v*v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i+1]; j++)
{
HYPRE_Complex v = A_offd_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v*v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
}
if (type == 2)
{
row_nrm = sqrt(row_nrm);
}
/* drop small entries based on tol and row norm */
for (j = A_diag_i_i; j < A_diag_i[i+1]; j++)
{
HYPRE_Int col = A_diag_j[j];
HYPRE_Complex val = A_diag_a[j];
if (i == col || fabs(val) >= tol * row_nrm)
{
A_diag_j[nnz_diag] = col;
A_diag_a[nnz_diag] = val;
nnz_diag ++;
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i+1]; j++)
{
HYPRE_Int col = A_offd_j[j];
HYPRE_Complex val = A_offd_a[j];
/* in normal cases: diagonal entry should not
* appear in A_offd (but this can still be possible) */
if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm)
{
if (0 == marker_offd[col])
{
marker_offd[col] = 1;
}
A_offd_j[nnz_offd] = col;
A_offd_a[nnz_offd] = val;
nnz_offd ++;
}
}
}
A_diag_i_i = A_diag_i[i+1];
A_offd_i_i = A_offd_i[i+1];
A_diag_i[i+1] = nnz_diag;
A_offd_i[i+1] = nnz_offd;
}
hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag;
hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd;
hypre_ParCSRMatrixSetNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A);
for (i = 0, k = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i])
{
col_map_offd_A[k] = col_map_offd_A[i];
marker_offd[i] = k++;
}
}
/* num_cols_A_offd = k; */
hypre_CSRMatrixNumCols(A_offd) = k;
for (i = 0; i < nnz_offd; i++)
{
A_offd_j[i] = marker_offd[A_offd_j[i]];
}
if ( hypre_ParCSRMatrixCommPkg(A) )
{
hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) );
}
hypre_MatvecCommPkgCreate(A);
hypre_TFree(marker_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*
#ifdef HYPRE_USING_UNIFIED_MEMORY
hypre_int hypre_ParCSRMatrixIsManaged(hypre_ParCSRMatrix *a){
if (hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(a)))
return ((hypre_CSRMatrixIsManaged(hypre_ParCSRMatrixDiag(a))) && (hypre_CSRMatrixIsManaged(hypre_ParCSRMatrixOffd(a))));
else
return hypre_CSRMatrixIsManaged(hypre_ParCSRMatrixDiag(a));
}
#endif
*/
|
config.c |
#include "grid.h"
#include "config.h"
#include <hdf5.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <errno.h>
const int WORK_SPLIT_THRESHOLD = 3;
double get_time_ns()
{
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
return ts.tv_sec + (double)ts.tv_nsec / 1000000000;
}
void bl_bounding_box(struct vis_spec *spec,
int a1, int a2,
int tstep0, int tstep1,
int fstep0, int fstep1,
double *uvw_l_min, double *uvw_l_max)
{
struct ant_config *cfg = spec->cfg;
// Check time start and end (TODO - that's simplifying quite a bit)
double uvw0[3], uvw1[3];
ha_to_uvw_sc(cfg, a1, a2,
spec->ha_sin[tstep0], spec->ha_cos[tstep0],
spec->dec_sin, spec->dec_cos,
uvw0);
ha_to_uvw_sc(cfg, a1, a2,
spec->ha_sin[tstep1], spec->ha_cos[tstep1],
spec->dec_sin, spec->dec_cos,
uvw1);
// Conversion factor to uvw in lambda
double f0, f1;
f0 = spec->freq_start + spec->freq_step * fstep0;
f1 = spec->freq_start + spec->freq_step * fstep1;
double scale0 = uvw_m_to_l(1, f0),
scale1 = uvw_m_to_l(1, f1);
// Determine bounding box
int i = 0;
for (i = 0; i < 3; i++) {
uvw_l_min[i] = fmin(fmin(uvw0[i]*scale0, uvw0[i]*scale1),
fmin(uvw1[i]*scale0, uvw1[i]*scale1));
uvw_l_max[i] = fmax(fmax(uvw0[i]*scale0, uvw0[i]*scale1),
fmax(uvw1[i]*scale0, uvw1[i]*scale1));
}
}
void bl_bounding_subgrids(struct vis_spec *spec,
double lam, double xA, int a1, int a2,
int *sg_min, int *sg_max)
{
double uvw_l_min[3], uvw_l_max[3];
bl_bounding_box(spec, a1, a2,
0, spec->time_count-1,
0, spec->freq_count-1,
uvw_l_min, uvw_l_max);
//printf("BL u %g-%g v %g-%g\n", uvw_l_min[0], uvw_l_max[0], uvw_l_min[1], uvw_l_max[1]);
// Convert into subgrid indices
sg_min[0] = (int)round(uvw_l_min[0]/lam/xA);
sg_min[1] = (int)round(uvw_l_min[1]/lam/xA);
sg_max[0] = (int)round(uvw_l_max[0]/lam/xA);
sg_max[1] = (int)round(uvw_l_max[1]/lam/xA);
}
struct worker_prio
{
int worker;
int nbl;
};
static int compare_prio_nbl(const void *_w1, const void *_w2)
{
const struct worker_prio *w1 = (const struct worker_prio *)_w1;
const struct worker_prio *w2 = (const struct worker_prio *)_w2;
return w1->nbl > w2->nbl;
}
static void bin_baseline(struct vis_spec *spec, double lam, double xA,
int *nbl, struct subgrid_work_bl **bls, int nsubgrid,
int a1, int a2, int iu, int iv)
{
assert (iu >= 0 && iu < nsubgrid);
assert (iv >= 0 && iv < nsubgrid);
int chunks = 0, tchunk, fchunk;
double sg_min_u = lam * (xA*(iu-nsubgrid/2) - xA/2);
double sg_min_v = lam * (xA*(iv-nsubgrid/2) - xA/2);
double sg_max_u = lam * (xA*(iu-nsubgrid/2) + xA/2);
double sg_max_v = lam * (xA*(iv-nsubgrid/2) + xA/2);
int ntchunk = (spec->time_count + spec->time_chunk - 1) / spec->time_chunk;
int nfchunk = (spec->freq_count + spec->freq_chunk - 1) / spec->freq_chunk;
// Count number of overlapping chunks
for (tchunk = 0; tchunk < ntchunk; tchunk++) {
// Check frequencies. We adjust step length exponentially so
// we can jump over non-matching space quicker, see
// below. This bit of code is likely a bit too smart for its
// own good!
int fstep = 1;
for (fchunk = 0; fchunk < nfchunk; fchunk+=fstep) {
// Determine chunk bounding box
double uvw_l_min[3], uvw_l_max[3];
bl_bounding_box(spec, a1, a2,
tchunk * spec->time_chunk,
fmin(spec->time_count, (tchunk+1) * spec->time_chunk) - 1,
fchunk * spec->freq_chunk,
fmin(spec->freq_count, (fchunk+fstep) * spec->freq_chunk) - 1,
uvw_l_min, uvw_l_max);
//printf("u: sg %g-%g chunk %g-%g\n", sg_min_u, sg_max_u, uvw_l_min[0], uvw_l_max[0]);
//printf("v: sg %g-%g chunk %g-%g\n", sg_min_v, sg_max_v, uvw_l_min[1], uvw_l_max[1]);
if ((uvw_l_min[0] < sg_max_u && uvw_l_max[0] > sg_min_u &&
uvw_l_min[1] < sg_max_v && uvw_l_max[1] > sg_min_v) ||
(-uvw_l_max[0] < sg_max_u && -uvw_l_min[0] > sg_min_u &&
-uvw_l_max[1] < sg_max_v && -uvw_l_min[1] > sg_min_v)) {
if (fstep == 1) {
// Found a chunk
chunks++;
} else {
// Went too fast. Decrease step length, recheck.
fstep /= 2;
fchunk -= fstep;
}
} else {
// Speed up. Increase step length.
fchunk -= fstep;
fstep *= 2;
}
}
}
if (!chunks)
return;
// Count
nbl[iv*nsubgrid + iu]+=chunks;
// Make sure we don't add a baseline twice
if (bls[iv * nsubgrid + iu]) {
assert(bls[iv * nsubgrid + iu]->a1 != a1 ||
bls[iv * nsubgrid + iu]->a2 != a2);
}
// Add work structure
struct subgrid_work_bl *wbl = (struct subgrid_work_bl *)
malloc(sizeof(struct subgrid_work_bl));
wbl->a1 = a1; wbl->a2 = a2; wbl->chunks=chunks;
wbl->next = bls[iv * nsubgrid + iu];
bls[iv * nsubgrid + iu] = wbl;
}
// Bin baselines per overlapping subgrid
static int collect_baselines(struct vis_spec *spec,
double lam, double xA,
int **pnbl, struct subgrid_work_bl ***pbls,
bool dump_baselines)
{
// Determine number of subgrid bins we need
int nsubgrid = 2 * (int)ceil(1. / 2 / xA) + 1;
int *nbl = (int *)calloc(sizeof(int), nsubgrid * nsubgrid);
struct subgrid_work_bl **bls = (struct subgrid_work_bl **)
calloc(sizeof(struct subgrid_work_bl *), nsubgrid * nsubgrid);
// Determine baseline bounding boxes
int nbl_total = spec->cfg->ant_count * (spec->cfg->ant_count - 1) / 2;
int *sg_mins = (int *)malloc(sizeof(int) * 2 * nbl_total),
*sg_maxs = (int *)malloc(sizeof(int) * 2 * nbl_total);
int a1, a2, bl = 0;
for (a1 = 0; a1 < spec->cfg->ant_count; a1++) {
for (a2 = a1+1; a2 < spec->cfg->ant_count; a2++, bl++) {
bl_bounding_subgrids(spec, lam, xA, a1, a2, sg_mins + bl * 2, sg_maxs + bl * 2);
}
}
int iv, iu;
#pragma omp parallel for collapse(2) schedule(dynamic,8)
for (iv = 0; iv < nsubgrid; iv++) {
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
int a1, a2, bl=0;
for (a1 = 0; a1 < spec->cfg->ant_count; a1++) {
for (a2 = a1+1; a2 < spec->cfg->ant_count; a2++, bl++) {
int *sg_min = sg_mins + bl * 2, *sg_max = sg_maxs + bl * 2;
if (iv >= nsubgrid/2+sg_min[1] && iv <= nsubgrid/2+sg_max[1] &&
iu >= nsubgrid/2+sg_min[0] && iu <= nsubgrid/2+sg_max[0]) {
bin_baseline(spec, lam, xA, nbl, bls, nsubgrid, a1, a2, iu, iv);
} else if(iv >= nsubgrid/2-sg_max[1] && iv <= nsubgrid/2-sg_min[1] &&
iu >= nsubgrid/2-sg_max[0] && iu <= nsubgrid/2-sg_min[0]) {
bin_baseline(spec, lam, xA, nbl, bls, nsubgrid, a1, a2, iu, iv);
}
}
}
}
}
free(sg_mins); free(sg_maxs);
// Produce dump if requested
if (dump_baselines) {
printf("Baseline bins:\n---\niu,iv,chunks\n");
for (iv = 0; iv < nsubgrid; iv++) {
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
int chunks = 0; struct subgrid_work_bl *bl;
for (bl = bls[nsubgrid*iv+iu]; bl; bl=bl->next) {
chunks += bl->chunks;
}
if (chunks) {
printf("%d,%d,%d\n", iu, iv, chunks);
}
}
}
printf("---\n");
}
*pnbl = nbl;
*pbls = bls;
return nsubgrid;
}
// Pop given number of baselines from the start of the linked list
static struct subgrid_work_bl *pop_chunks(struct subgrid_work_bl **bls, int n, int *nchunks)
{
struct subgrid_work_bl *first = *bls;
struct subgrid_work_bl *bl = *bls;
*nchunks = 0;
assert(n >= 1);
if (!bl) return bl;
while (n > bl->chunks && bl->next) {
*nchunks += bl->chunks;
n-=bl->chunks;
bl = bl->next;
}
*nchunks += bl->chunks;
*bls = bl->next;
bl->next = NULL;
return first;
}
static bool generate_subgrid_work_assignment(struct work_config *cfg)
{
struct vis_spec *spec = &cfg->spec;
// Count visibilities per sub-grid
double xA = (double)cfg->recombine.xA_size / cfg->recombine.image_size;
int *nbl; struct subgrid_work_bl **bls;
printf("Covering %d time steps, %d channels, %d baselines\n",
cfg->spec.time_count, cfg->spec.freq_count,
cfg->spec.cfg->ant_count * (cfg->spec.cfg->ant_count - 1) / 2);
printf("Binning chunks...\n");
double start = get_time_ns();
int nsubgrid = collect_baselines(spec, cfg->recombine.image_size / cfg->theta,
xA, &nbl, &bls, cfg->config_dump_baseline_bins);
printf(" %g s\n", get_time_ns() - start);
// Count how many sub-grids actually have visibilities
int npop = 0, nbl_total = 0, nbl_max = 0;
int iu, iv;
for (iv = 0; iv < nsubgrid; iv++) {
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
if (nbl[iv * nsubgrid + iu]) {
npop++;
nbl_total+=nbl[iv * nsubgrid + iu];
if (nbl[iv * nsubgrid + iu] > nbl_max)
nbl_max = nbl[iv * nsubgrid + iu];
}
}
}
double coverage = (double)npop * cfg->recombine.xA_size * cfg->recombine.xA_size
/ cfg->recombine.image_size / cfg->recombine.image_size;
// We don't want bins that are too full compared to the average -
// determine at what point we're going to split them.
int work_max_nbl = (int)fmax(WORK_SPLIT_THRESHOLD * nbl_total / npop,
(nbl_max + cfg->subgrid_workers - 1) / cfg->subgrid_workers);
printf("%d subgrid baseline bins (%.3g%% coverage), %.5g average chunks per subgrid, "
"splitting above %d\n",
npop, coverage*100, (double)nbl_total / npop, work_max_nbl);
// Now count again how much work we have total, and per
// column. Note that we ignore grid data at u < 0, as transferring
// half the grid is enough to reconstruct a real-valued image.
int nwork = 0, max_work_column = 0;
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
int nwork_start = nwork;
for (iv = 0; iv < nsubgrid; iv++) {
int nv = nbl[iv * nsubgrid + iu];
nwork += (nv + work_max_nbl - 1) / work_max_nbl;
}
// How much work in this column?
if (nwork - nwork_start > max_work_column)
max_work_column = nwork-nwork_start;
}
// Allocate work description
cfg->subgrid_max_work = (nwork + cfg->subgrid_workers - 1) / cfg->subgrid_workers;
cfg->subgrid_work = (struct subgrid_work *)
calloc(sizeof(struct subgrid_work), cfg->subgrid_workers * cfg->subgrid_max_work);
printf("%d split subgrid baseline bins, %d per worker\n", nwork, cfg->subgrid_max_work);
// Worker priority order for acquiring new work
struct worker_prio *worker_prio = malloc(sizeof(worker_prio) * cfg->subgrid_workers);
int i;
for (i = 0; i < cfg->subgrid_workers; i++) {
worker_prio[i].worker = i;
worker_prio[i].nbl = 0;
}
// Go through columns and assign work
int iworker = 0, iwork = 0;
for (iu = nsubgrid/2; iu < nsubgrid; iu++) {
// Generate column of work
int start_bl;
for (iv = 0; iv < nsubgrid; iv++) {
int nv = nbl[iv * nsubgrid + iu];
for (start_bl = 0; start_bl < nv; start_bl += work_max_nbl) {
// Assign work to next worker
struct subgrid_work *work =
cfg->subgrid_work + iworker * cfg->subgrid_max_work + iwork;
work->iu = iu - nsubgrid/2;
work->iv = iv - nsubgrid/2;
work->subgrid_off_u = cfg->recombine.xA_size * work->iu;
work->subgrid_off_v = cfg->recombine.xA_size * work->iv;
work->bls = pop_chunks(&bls[iv * nsubgrid + iu], work_max_nbl,
&work->nbl);
// Save back how many chunks were assigned
worker_prio[iworker].nbl += work->nbl;
iworker++;
if (iworker >= cfg->subgrid_workers) {
iworker = 0;
iwork++;
}
}
}
}
// Determine average
int64_t sum = 0;
for (i = 0; i < cfg->subgrid_workers; i++) {
sum += worker_prio[i].nbl;
}
int average = sum / cfg->subgrid_workers;
// Swap work to even out profile
bool improvement; int nswaps = 0;
do {
improvement = false;
// Sort worker priority
qsort(worker_prio, cfg->subgrid_workers, sizeof(void *), compare_prio_nbl);
// Walk through worker pairs
int prio1 = 0, prio2 = cfg->subgrid_workers - 1;
while(prio1 < prio2) {
int diff = worker_prio[prio2].nbl - worker_prio[prio1].nbl;
int worker1 = worker_prio[prio1].worker;
int worker2 = worker_prio[prio2].worker;
// Find a work item to switch
int iwork;
struct subgrid_work *work1 = cfg->subgrid_work + worker1 * cfg->subgrid_max_work;
struct subgrid_work *work2 = cfg->subgrid_work + worker2 * cfg->subgrid_max_work;
int best = -1, best_diff = diff;
for (iwork = 0; iwork < cfg->subgrid_max_work; iwork++) {
int wdiff = work2[iwork].nbl - work1[iwork].nbl;
if (abs(diff - 2*wdiff) < best_diff) {
best = iwork; best_diff = abs(diff - 2*wdiff);
}
}
// Found a swap?
if (best != -1) {
struct subgrid_work w = work1[best];
work1[best] = work2[best];
work2[best] = w;
worker_prio[prio1].nbl += work1[best].nbl - work2[best].nbl;
worker_prio[prio2].nbl += work2[best].nbl - work1[best].nbl;
improvement = true;
nswaps++;
break;
}
// Step workers. Keep the one that is further away from the
// average.
if (abs(worker_prio[prio2].nbl - average) >
abs(worker_prio[prio1].nbl - average)) {
prio1++;
} else {
prio2--;
}
}
} while(improvement);
// Statistics
int min_vis = INT_MAX, max_vis = 0;
cfg->iu_min = INT_MAX; cfg->iu_max = INT_MIN;
cfg->iv_min = INT_MAX; cfg->iv_max = INT_MIN;
for (i = 0; i < cfg->subgrid_workers; i++) {
int j; int vis = 0;
for (j = 0; j < cfg->subgrid_max_work; j++) {
struct subgrid_work *work = cfg->subgrid_work + i* cfg->subgrid_max_work+j;
if (work->iu < cfg->iu_min) cfg->iu_min = work->iu;
if (work->iu > cfg->iu_max) cfg->iu_max = work->iu;
if (work->iv < cfg->iv_min) cfg->iv_min = work->iv;
if (work->iv > cfg->iv_max) cfg->iv_max = work->iv;
vis += work->nbl;
//printf("%d ", work->nbl);
}
//printf(" -> %d %d\n", vis, worker_prio[i].nbl);
min_vis = fmin(vis, min_vis);
max_vis = fmax(vis, max_vis);
}
printf("Assigned workers %d chunks min, %d chunks max (after %d swaps)\n", min_vis, max_vis, nswaps);
if (cfg->config_dump_subgrid_work) {
printf("Subgrid work (after swaps):\n---\nworker,work,chunks\n");
for (i = 0; i < cfg->subgrid_workers; i++) {
int j;
for (j = 0; j < cfg->subgrid_max_work; j++) {
struct subgrid_work *work = cfg->subgrid_work + i* cfg->subgrid_max_work+j;
if (work->nbl > 0) {
printf("%d,%d,%d\n", i,j, work->nbl);
}
}
}
puts("---");
}
return true;
}
static bool generate_facet_work_assignment(struct work_config *cfg)
{
if (cfg->facet_workers == 0) return true;
// This is straightforward: We just assume that all facets within
// the field of view are set. Note that theta is generally larger
// than the FoV, so this won't cover the entire image.
double yB = (double)cfg->recombine.yB_size / cfg->recombine.image_size;
int nfacet = 2 * ceil(cfg->spec.fov / cfg->theta / yB / 2 - 0.5) + 1;
printf("%dx%d facets covering %g FoV (facet %g, grid theta %g)\n",
nfacet, nfacet, cfg->spec.fov, cfg->theta * yB, cfg->theta);
// Allocate work array
cfg->facet_max_work = (nfacet * nfacet + cfg->facet_workers - 1) / cfg->facet_workers;
cfg->facet_count = nfacet * nfacet;
cfg->facet_work = (struct facet_work *)
calloc(sizeof(struct facet_work), cfg->facet_workers * cfg->facet_max_work);
int i;
for (i = 0; i < nfacet * nfacet; i++) {
int iworker = i % cfg->facet_workers, iwork = i / cfg->facet_workers;
struct facet_work *work = cfg->facet_work + cfg->facet_max_work * iworker + iwork;
work->il = (i / nfacet) - nfacet/2;
work->im = (i % nfacet) - nfacet/2;
work->facet_off_l = work->il * cfg->recombine.yB_size;
work->facet_off_m = work->im * cfg->recombine.yB_size;
work->set = true;
}
return true;
}
static bool generate_full_redistribute_assignment(struct work_config *cfg)
{
// No visibilities involved, so generate work assignment where we
// simply redistribute all data from a number of facets matching
// the number of facet workers.
assert(!cfg->spec.time_count);
int nsubgrid = cfg->recombine.image_size / cfg->recombine.xA_size;
int subgrid_work = nsubgrid * nsubgrid;
cfg->subgrid_max_work = (subgrid_work + cfg->subgrid_workers - 1) / cfg->subgrid_workers;
cfg->subgrid_work = (struct subgrid_work *)
calloc(sizeof(struct subgrid_work), cfg->subgrid_max_work * cfg->subgrid_workers);
int i;
for (i = 0; i < subgrid_work; i++) {
struct subgrid_work *work = cfg->subgrid_work + i;
work->iu = i / nsubgrid;
work->iv = i % nsubgrid;
work->subgrid_off_u = work->iu * cfg->recombine.xA_size;
work->subgrid_off_v = work->iv * cfg->recombine.xA_size;
work->nbl = 1;
// Dummy 0-0 baseline
work->bls = (struct subgrid_work_bl *)calloc(sizeof(struct subgrid_work_bl), 1);
}
cfg->iu_min = cfg->iv_min = 0;
cfg->iu_max = cfg->iv_max = nsubgrid-1;
if (cfg->facet_workers == 0) return true;
int nfacet = cfg->recombine.image_size / cfg->recombine.yB_size;
cfg->facet_max_work = (nfacet * nfacet + cfg->facet_workers - 1) / cfg->facet_workers;
cfg->facet_count = nfacet * nfacet;
cfg->facet_work = (struct facet_work *)
calloc(sizeof(struct facet_work), cfg->facet_max_work * cfg->facet_workers);
for (i = 0; i < nfacet * nfacet; i++) {
int iworker = i % cfg->facet_workers, iwork = i / cfg->facet_workers;
struct facet_work *work = cfg->facet_work + cfg->facet_max_work * iworker + iwork;
work->il = i / nfacet;
work->im = i % nfacet;
work->facet_off_l = work->il * cfg->recombine.yB_size;
work->facet_off_m = work->im * cfg->recombine.yB_size;
work->set = true;
}
return true;
}
void config_init(struct work_config *cfg)
{
// Initialise structure
memset(cfg, 0, sizeof(*cfg));
cfg->gridder_x0 = 0.5;
cfg->config_dump_baseline_bins = false;
cfg->config_dump_subgrid_work = false;
cfg->produce_parallel_cols = false;
cfg->produce_retain_bf = true;
cfg->produce_source_count = 0;
cfg->produce_source_checks = 16384;
cfg->produce_batch_rows = 16;
cfg->produce_queue_length = 4;
cfg->vis_skip_metadata = true;
cfg->vis_bls_per_task = 256;
cfg->vis_subgrid_queue_length = 4;
cfg->vis_task_queue_length = 96;
cfg->vis_chunk_queue_length = 32768;
cfg->statsd_socket = -1;
cfg->statsd_rate = 1;
}
static void print_power2(int x)
{
if (x >= 1024 * 1024 && x % (1024 * 1024) == 0) {
printf("%dM", x / 1024 / 1024);
} else if (x >= 1024 && x % 1024 == 0) {
printf("%dk", x / 1024);
} else {
printf("%d", x);
}
}
bool config_set(struct work_config *cfg,
int image_size, int subgrid_spacing,
char *pswf_file,
int yB_size, int yN_size, int yP_size,
int xA_size, int xM_size, int xMxN_yP_size)
{
// Set recombination configuration
printf("\nInitialising recombination (image size "); print_power2(image_size);
printf(", facet FFT "); print_power2(yP_size);
printf(", subgrid FFT "); print_power2(xM_size);
printf(")...\n");
if (!recombine2d_set_config(&cfg->recombine, image_size, subgrid_spacing, pswf_file,
yB_size, yN_size, yP_size,
xA_size, xM_size, xMxN_yP_size))
return false;
return true;
}
void config_free(struct work_config *cfg)
{
free(cfg->vis_path);
free(cfg->facet_work);
free(cfg->gridder_path);
free(cfg->grid_correction);
int i;
for (i = 0; i < cfg->subgrid_workers * cfg->subgrid_max_work; i++) {
while (cfg->subgrid_work[i].bls) {
struct subgrid_work_bl *bl = cfg->subgrid_work[i].bls;
cfg->subgrid_work[i].bls = cfg->subgrid_work[i].bls->next;
free(bl);
}
}
free(cfg->subgrid_work);
free(cfg->spec.ha_sin);
free(cfg->spec.ha_cos);
if (cfg->statsd_socket != -1) close(cfg->statsd_socket);
cfg->statsd_socket = -1;
}
void config_set_visibilities(struct work_config *cfg,
struct vis_spec *spec, double theta,
const char *vis_path)
{
// Copy
cfg->spec = *spec;
cfg->theta = theta;
if (vis_path)
cfg->vis_path = strdup(vis_path);
// Cache cosinus + sinus values
cfg->spec.ha_sin = (double *)malloc(sizeof(double) * cfg->spec.time_count);
cfg->spec.ha_cos = (double *)malloc(sizeof(double) * cfg->spec.time_count);
int it;
for (it = 0; it < cfg->spec.time_count; it++) {
double t = spec->time_start + spec->time_step * it;
cfg->spec.ha_sin[it] = sin(t * M_PI / 12);
cfg->spec.ha_cos[it] = cos(t * M_PI / 12);
}
cfg->spec.dec_sin = sin(cfg->spec.dec);
cfg->spec.dec_cos = cos(cfg->spec.dec);
}
bool config_set_degrid(struct work_config *cfg, const char *gridder_path)
{
if (gridder_path) {
// Clear existing data, if any
cfg->gridder_x0 = 0.5;
free(cfg->gridder_path); cfg->gridder_path = 0;
free(cfg->grid_correction); cfg->grid_correction = 0;
// Get gridder's accuracy limit
double *px0 = (double *)read_hdf5(sizeof(double), gridder_path, "sepkern/x0");
if (!px0) return false;
printf("Gridder %s with x0=%g\n", gridder_path, *px0);
// Get grid correction dimensions
int ncorr = get_npoints_hdf5(gridder_path, "sepkern/corr");
// Read grid correction
double *grid_corr = read_hdf5(sizeof(double) * ncorr, gridder_path, "sepkern/corr");
if (!grid_corr) {
fprintf(stderr, "ERROR: Could not read grid correction from %s!\n", gridder_path);
return false;
}
// Need to rescale? This is linear, therefore worth a
// warning. Might want to do a "sinc" interpolation instead at
// some point? Could be more appropriate.
if (ncorr != cfg->recombine.image_size) {
if (ncorr % cfg->recombine.image_size != 0) {
fprintf(stderr, "WARNING: Rescaling grid correction from %d to %d points!\n",
ncorr, cfg->recombine.image_size);
}
int i;
cfg->grid_correction = (double *)malloc(sizeof(double) * cfg->recombine.image_size);
for (i = 0; i < cfg->recombine.image_size; i++) {
double j = (double)i * ncorr / cfg->recombine.image_size;
int j0 = (int)floor(j), j1 = (j0 + 1) % ncorr;
double w = j - j0;
cfg->grid_correction[i] = (1 - w) * grid_corr[j0] + w * grid_corr[j1];
}
free(grid_corr);
} else {
cfg->grid_correction = grid_corr;
}
cfg->gridder_x0 = *px0;
cfg->gridder_path = strdup(gridder_path);
free(px0);
}
return true;
}
bool config_set_statsd(struct work_config *cfg,
const char *node, const char *service)
{
if (cfg->statsd_socket != -1) close(cfg->statsd_socket);
cfg->statsd_socket = -1;
// Resolve statsd address
struct addrinfo hints, *result;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_DGRAM;
int ret = getaddrinfo(node, service, &hints, &result);
if (ret != 0) {
fprintf(stderr, "ERROR: Could not resolve statsd address (%s)", gai_strerror(ret));
return false;
}
// Create socket
struct addrinfo *addr = NULL;
for (addr = result; addr; addr = addr->ai_next) {
// Attempt to create socket
cfg->statsd_socket = socket(addr->ai_family, addr->ai_socktype, addr->ai_protocol);
if (cfg->statsd_socket == -1)
continue;
// And connect
if (connect(cfg->statsd_socket, addr->ai_addr, addr->ai_addrlen) != -1)
break;
close(cfg->statsd_socket); cfg->statsd_socket = -1;
}
if (cfg->statsd_socket == -1) {
fprintf(stderr, "ERROR: Could not create statsd socket (%s)", strerror(errno));
freeaddrinfo(result);
return false;
}
freeaddrinfo(result);
// Initialise stats
printf("Opened statsd connection to %s:%s\n", node, service);
return true;
}
void config_send_statsd(struct work_config *cfg, const char *stat)
{
if (cfg->statsd_socket == -1)
return;
//printf("stats: %s\n", stat);
if (write(cfg->statsd_socket, stat, strlen(stat)) != strlen(stat)) {
fprintf(stderr, "ERROR: Failed to send to statsd (%s)\n", strerror(errno));
close(cfg->statsd_socket);
cfg->statsd_socket = -1;
}
}
void config_load_facets(struct work_config *cfg,
const char *path_fmt,
const char *hdf5)
{
int i;
for (i = 0; i < cfg->facet_workers * cfg->facet_max_work; i++) {
struct facet_work *work = cfg->facet_work + i;
if (!work->set) continue;
char path[256];
snprintf(path, 256, path_fmt, work->im, work->il);
work->path = strdup(path);
work->hdf5 = hdf5 ? strdup(hdf5) : NULL;
}
}
void config_check_subgrids(struct work_config *cfg,
double threshold, double fct_threshold,
double degrid_threshold,
const char *check_fmt,
const char *check_fct_fmt,
const char *check_degrid_fmt,
const char *hdf5)
{
int i;
for (i = 0; i < cfg->subgrid_workers * cfg->subgrid_max_work; i++) {
struct subgrid_work *work = cfg->subgrid_work + i;
if (!work->nbl) continue;
char path[256];
if (check_fmt) {
snprintf(path, 256, check_fmt, work->iv, work->iu);
work->check_path = strdup(path);
}
if (check_fct_fmt) {
snprintf(path, 256, check_fct_fmt, work->iv, work->iu);
work->check_fct_path = strdup(path);
}
if (check_degrid_fmt) {
snprintf(path, 256, check_degrid_fmt, work->iv, work->iu);
work->check_degrid_path = strdup(path);
}
work->check_hdf5 = hdf5 ? strdup(hdf5) : NULL;
work->check_threshold = threshold;
work->check_fct_threshold = fct_threshold;
work->check_degrid_threshold = degrid_threshold;
}
}
bool config_assign_work(struct work_config *cfg,
int facet_workers, int subgrid_workers)
{
cfg->facet_workers = facet_workers;
cfg->subgrid_workers = subgrid_workers;
// Generate work assignments
if (cfg->spec.time_count) {
printf("\nGenerating work assignments...\n");
if (!generate_facet_work_assignment(cfg))
return false;
if (!generate_subgrid_work_assignment(cfg))
return false;
} else {
if (!generate_full_redistribute_assignment(cfg))
return false;
}
// Warn if we have multiple facets per worker
if (cfg->facet_max_work > 1) {
printf("WARNING: %d facets, but only %d workers. Consider more MPI ranks.\n",
cfg->facet_count, cfg->facet_workers);
}
return true;
}
// Make baseline specification. Right now this is the same for every
// baseline, but this will change for baseline dependent averaging.
void vis_spec_to_bl_data(struct bl_data *bl, struct vis_spec *spec,
int a1, int a2)
{
int i;
// Create baseline structure
bl->time_count = spec->time_count;
bl->time = (double *)malloc(sizeof(double) * spec->time_count);
for (i = 0; i < spec->time_count; i++) {
bl->time[i] = spec->time_start + spec->time_step * i;
}
bl->uvw_m = (double *)malloc(sizeof(double) * spec->time_count * 3);
for (i = 0; i < spec->time_count; i++) {
ha_to_uvw_sc(spec->cfg, a1, a2,
spec->ha_sin[i], spec->ha_cos[i],
spec->dec_sin, spec->dec_cos,
bl->uvw_m + i*3);
}
bl->freq_count = spec->freq_count;
bl->freq = (double *)malloc(sizeof(double) * spec->freq_count);
for (i = 0; i < spec->freq_count; i++) {
bl->freq[i] = spec->freq_start + spec->freq_step * i;
}
bl->antenna1 = a1;
bl->antenna2 = a2;
}
bool create_bl_groups(hid_t vis_group, struct work_config *work_cfg, int worker)
{
struct vis_spec *spec = &work_cfg->spec;
struct ant_config *cfg = spec->cfg;
// Map baselines to work
struct subgrid_work **bl_work = NULL;
if (worker >= 0) {
bl_work = (struct subgrid_work **)
calloc(sizeof(struct subgrid_work *), cfg->ant_count * cfg->ant_count);
struct subgrid_work *work = work_cfg->subgrid_work + worker * work_cfg->subgrid_max_work;
int iwork;
for (iwork = 0; iwork < work_cfg->subgrid_max_work; iwork++) {
if (work[iwork].nbl == 0) continue;
struct subgrid_work_bl *bl;
for (bl = work[iwork].bls; bl; bl = bl->next) {
// Note this might overlap (here: overwrite). We are just
// interested in an example below.
bl_work[bl->a1 * cfg->ant_count + bl->a2] = &work[iwork];
}
}
}
int a1, a2;
int ncreated = 0;
uint64_t nvis = 0;
double create_start = get_time_ns();
for (a1 = 0; a1 < cfg->ant_count; a1++) {
// Progress message
if (a1 % 32 == 0) { printf("%d ", a1); fflush(stdout); }
hid_t a1_g = 0;
for (a2 = a1+1; a2 < cfg->ant_count; a2++) {
if (bl_work) {
struct subgrid_work *bw = bl_work[a1 * cfg->ant_count + a2];
if (!bw) continue;
}
// Create outer antenna group, if not already done so
if (!a1_g) {
char a1name[12];
sprintf(a1name, "%d", a1);
a1_g = H5Gcreate(vis_group, a1name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if (a1_g < 0) {
fprintf(stderr, "Could not open '%s' antenna group!\n", a1name);
return false;
}
}
// Create inner antenna group
char a2name[12];
sprintf(a2name, "%d", a2);
hid_t a2_g = H5Gcreate(a1_g, a2name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
if (a2_g < 0) {
fprintf(stderr, "Could not open '%s' antenna group!\n", a2name);
H5Gclose(a1_g);
return false;
}
// Create baseline structure (TODO: baseline-dependent averaging...)
struct bl_data bl;
vis_spec_to_bl_data(&bl, spec, a1, a2);
// Write to visibility group
if (!create_vis_group(a2_g, spec->freq_chunk, spec->time_chunk,
work_cfg->vis_skip_metadata, &bl)) {
H5Gclose(a2_g); H5Gclose(a1_g);
return 1;
}
// Statistics & cleanups
ncreated++;
nvis += bl.time_count * bl.freq_count;
free(bl.time); free(bl.uvw_m); free(bl.freq);
H5Gclose(a2_g);
}
if (a1_g) H5Gclose(a1_g);
}
printf("\ndone in %.2fs, %d groups for up to %ld visibilities (~%.3f GB) created\n",
get_time_ns() -create_start, ncreated, nvis, 16. * nvis / 1000000000);
return true;
}
|
vector-product_gpu.c | /*
This program calculates result of vector product.
It generates a vector with the results of two vectors multiplication.
This program create a csv file with the time execution results for each
function(CPU,GPU) in this format: size of vector,cpu time,gpu time.
Author: Kezia Andrade
Date: 04-06-2015
version 1.0
Run:
folder_ipmacc/ipmacc folder_archive/mat-mul-sun-acc.c
./a.out
*/
#include "BenchmarksUtil.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
// Initialize matrices.
void init_arrays(float *A, float *B) {
int i;
for (i = 0; i < SIZE; ++i) {
A[i] = (float)i + 3 * i;
B[i] = (float)i + 2 * i;
}
}
void product_GPU(float *A, float *B, float *C) {
int i;
#pragma omp target map(to : A[0 : SIZE], B[0 : SIZE]) map(from : C[0 : SIZE]) \
device(DEVICE_ID)
{
#pragma omp parallel for
for (i = 0; i < SIZE; ++i) {
C[i] = A[i] * B[i];
}
}
}
void product_CPU(float *A, float *B, float *C) {
int i;
for (i = 0; i < SIZE; ++i) {
C[i] = A[i] * B[i];
}
}
int compareResults(float *A, float *A_outputFromGpu) {
int i, j, fail;
fail = 0;
for (i = 0; i < SIZE; i++) {
if (percentDiff(A[i], A_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
// printf("i: %d j: %d \n1: %f\n 2: %f\n", i, j, A[i*N + j],
// A_outputFromGpu[i*N + j]);
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end;
float *A, *B, *C_CPU, *C_GPU;
int fail = 0;
A = (float *)malloc(sizeof(float) * SIZE);
B = (float *)malloc(sizeof(float) * SIZE);
C_CPU = (float *)malloc(sizeof(float) * SIZE);
C_GPU = (float *)malloc(sizeof(float) * SIZE);
fprintf(stdout, "<< Vector Product >>\n");
init_arrays(A, B);
t_start = rtclock();
product_GPU(A, B, C_GPU);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
product_CPU(A, B, C_CPU);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(C_CPU, C_GPU);
#endif
free(A);
free(B);
free(C_CPU);
free(C_GPU);
return fail;
}
|
DRB004-antidep2-var-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two nested loops with loop-carried anti-dependence on the outer level.
This is a variable-length array version in C99.
Data race pair: a[i][j]@70:7 vs. a[i+1][j]@70:18
*/
#include <stdlib.h>
int main(int argc, char * argv[])
{
int i, j;
int len = 20;
double a[len][len];
int _ret_val_0;
if (argc>1)
{
len=atoi(argv[1]);
}
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<len; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<len; j ++ )
{
a[i][j]=0.5;
}
}
#pragma cetus private(i, j)
#pragma loop name main#1
for (i=0; i<(len-1); i+=1)
{
#pragma cetus private(j)
#pragma loop name main#1#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<len; j+=1)
{
a[i][j]+=a[i+1][j];
}
}
#pragma cetus private(i, j)
#pragma loop name main#2
for (i=0; i<len; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#2#0
for (j=0; j<len; j ++ )
{
printf("%lf\n", a[i][j]);
}
}
_ret_val_0=0;
return _ret_val_0;
}
|
gemm.c | /**
* gemm.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
#define BENCHMARK_NAME "GEMM"
// define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
/* Problem size. */
#ifdef RUN_POLYBENCH_SIZE
#define SIZE 512
#elif RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define NI SIZE
#define NJ SIZE
#define NK SIZE
/* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0)
*/
#define ALPHA 32412.0f
#define BETA 2123.0f
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void gemm(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) {
int i, j, k;
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
C[i * NJ + j] *= BETA;
for (k = 0; k < NK; ++k) {
C[i * NJ + j] += ALPHA * A[i * NK + k] * B[k * NJ + j];
}
}
}
}
void gemm_OMP(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *Cinit) {
#pragma omp target map(to : A[ : NI *NK], B[ : NK *NJ], Cinit[ : NI *NJ]) map(from : C[ : NI *NJ]) device(DEVICE_ID)
#pragma omp teams distribute parallel for
for (int i = 0; i < NI; i++) {
for (int j = 0; j < NJ; j++) {
C[i * NJ + j] = Cinit[i * NJ + j] * BETA;
for (int k = 0; k < NK; ++k) {
C[i * NJ + j] += ALPHA * A[i * NK + k] * B[k * NJ + j];
}
}
}
}
void init(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *C_OMP) {
int i, j;
for (i = 0; i < NI; i++) {
for (j = 0; j < NK; j++) {
A[i * NK + j] = ((DATA_TYPE)i * j) / NI;
}
}
for (i = 0; i < NK; i++) {
for (j = 0; j < NJ; j++) {
B[i * NJ + j] = ((DATA_TYPE)i * j + 1) / NJ;
}
}
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
C[i * NJ + j] = ((DATA_TYPE)i * j + 2) / NJ;
C_OMP[i * NJ + j] = ((DATA_TYPE)i * j + 2) / NJ;
}
}
}
int compareResults(DATA_TYPE *C, DATA_TYPE *C_outputFromGpu) {
int i, j, fail;
fail = 0;
// Compare C1 and C2
for (i = 0; i < NI; i++) {
for (j = 0; j < NJ; j++) {
if (percentDiff(C[i * NJ + j], C_outputFromGpu[i * NJ + j]) >
PERCENT_DIFF_ERROR_THRESHOLD) {
fail++;
fprintf(stdout, "%f != %f \n", C[i * NJ + j],
C_outputFromGpu[i * NJ + j]);
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
PERCENT_DIFF_ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *C;
DATA_TYPE *C_outputFromGpu;
DATA_TYPE *Cinit_outputFromGpu;
A = (DATA_TYPE *)malloc(NI * NK * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(NK * NJ * sizeof(DATA_TYPE));
C = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
C_outputFromGpu = (DATA_TYPE *)calloc(NI * NJ, sizeof(DATA_TYPE));
Cinit_outputFromGpu = (DATA_TYPE *)malloc(NI * NJ * sizeof(DATA_TYPE));
//fprintf(stdout, "<< Matrix-multiply C=alpha.A.B+beta.C size: %d>>\n", SIZE);
printBenchmarkInfo(BENCHMARK_NAME, SIZE);
init(A, B, C, Cinit_outputFromGpu);
t_start = rtclock();
gemm_OMP(A, B, C_outputFromGpu, Cinit_outputFromGpu);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
gemm(A, B, C);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(C, C_outputFromGpu);
#endif
free(A);
free(B);
free(C);
free(C_outputFromGpu);
return fail;
}
|
integrator.c | #define _USE_MATH_DEFINES
#include <string.h>
#include <assert.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "io.h"
#include "storage.h"
#include "integrator.h"
#include "cmontecarlo.h"
#include "omp_helper.h"
#define NULEN 0
#define LINELEN 1
#define PLEN 2
#define SHELLEN 3
#define C_INV 3.33564e-11
#define M_PI acos (-1)
#define KB_CGS 1.3806488e-16
#define H_CGS 6.62606957e-27
/**
* Calculate the intensity of a black-body according to the following formula
* .. math::
* I(\\nu, T) = \\frac{2h\\nu^3}{c^2}\frac{1}{e^{h\\nu \\beta_\\textrm{rad}} - 1}
*/
double
intensity_black_body (double nu, double T)
{
double beta_rad = 1 / (KB_CGS * T);
double coefficient = 2 * H_CGS * C_INV * C_INV;
return coefficient * nu * nu * nu / (exp(H_CGS * nu * beta_rad) - 1 );
}
/*! @brief Algorithm to integrate an array using the trapezoid integration rule
*
*/
double
trapezoid_integration (const double* array, const double h, int N)
{
double result = (array[0] + array[N-1])/2;
for (int idx = 1; idx < N-1; ++idx)
{
result += array[idx];
}
return result * h;
}
/*! @brief Calculate distance to p line
*
* Calculate half of the length of the p-line inside a shell
* of radius r in terms of unit length (c * t_exp).
* If shell and p-line do not intersect, return 0.
*
* @param r radius of the shell
* @param p distance of the p-line to the center of the supernova
* @param inv_t inverse time_explosio is needed to norm to unit-length
* @return half the lenght inside the shell or zero
*/
static inline double
calculate_z(double r, double p, double inv_t)
{
return (r > p) ? sqrt(r * r - p * p) * C_INV * inv_t : 0;
}
/*!
* @brief Calculate p line intersections
*
* This function calculates the intersection points of the p-line with each shell
*
* @param storage (INPUT) A storage model containing the environment
* @param p (INPUT) distance of the integration line to the center
* @param oz (OUTPUT) will be set with z values. The array is truncated by the
* value `1`.
* @param oshell_id (OUTPUT) will be set with the corresponding shell_ids
* @return number of shells intersected by the p-line
*/
int64_t
populate_z(const storage_model_t *storage, const double p, double *oz, int64_t *oshell_id)
{
// Abbreviations
double *r = storage->r_outer_i;
const int64_t N = storage->no_of_shells_i;
double inv_t = storage->inverse_time_explosion;
double z = 0;
int64_t i = 0, offset = N, i_low, i_up;
if (p <= storage->r_inner_i[0])
{
// Intersect the photosphere
for(i = 0; i < N; ++i)
{ // Loop from inside to outside
oz[i] = 1 - calculate_z(r[i], p, inv_t);
oshell_id[i] = i;
}
return N;
}
else
{
// No intersection with the photosphere
// that means we intersect each shell twice
for(i = 0; i < N; ++i)
{ // Loop from inside to outside
z = calculate_z(r[i], p, inv_t);
if (z == 0)
continue;
if (offset == N)
{
offset = i;
}
// Calculate the index in the resulting array
i_low = N - i - 1; // the far intersection with the shell
i_up = N + i - 2 * offset; // the nearer intersection with the shell
// Setting the arrays
oz[i_low] = 1 + z;
oshell_id[i_low] = i;
oz[i_up] = 1 - z;
oshell_id[i_up] = i;
}
return 2 * (N - offset);
}
}
/*! @brief Calculate integration points
*
*/
void
calculate_p_values(double R_max, int64_t N, double *opp)
{
for(int i = 0; i<N; ++i)
{
// Trapezoid integration points
opp[i] = R_max/(N - 1) * (i);
}
}
/*! @brief Caculate a spectrum using the formal integral approach
*
*/
double *
_formal_integral(
const storage_model_t *storage,
double iT,
double *inu, int64_t inu_size,
double *att_S_ul, double *Jred_lu, double *Jblue_lu, int N)
{
// Initialize the output which is shared among threads
double *L = calloc(inu_size, sizeof(double));
// global read-only values
int64_t size_line = storage->no_of_lines,
size_shell = storage->no_of_shells_i,
size_tau = size_line * size_shell,
finished_nus = 0;
double R_ph = storage->r_inner_i[0];
double R_max = storage->r_outer_i[size_shell - 1];
double pp[N];
double *exp_tau = calloc(size_tau, sizeof(double));
#pragma omp parallel firstprivate(L, exp_tau)
{
#pragma omp master
{
if (omp_get_num_threads() > 1) {
fprintf(stderr, "Doing the formal integral\nRunning with OpenMP - %d threads\n", omp_get_num_threads());
} else {
fprintf(stderr, "Doing the formal integral\nRunning without OpenMP\n");
}
print_progress_fi(0, inu_size);
}
// Initializing all the thread-local variables
int64_t offset = 0, i = 0,
size_z = 0,
idx_nu_start = 0,
direction = 0,
first = 0;
double I_nu[N],
//I_nu_b[N],
//I_nu_r[N],
z[2 * storage->no_of_shells_i],
p = 0,
nu_start,
nu_end,
nu,
zstart,
zend,
escat_contrib,
escat_op,
Jkkp;
int64_t shell_id[2 * storage->no_of_shells_i];
double *pexp_tau, *patt_S_ul, *pline, *pJred_lu, *pJblue_lu;
// Prepare exp_tau
#pragma omp for
for (i = 0; i < size_tau; ++i) {
exp_tau[i] = exp( -storage->line_lists_tau_sobolevs_i[i]);
}
calculate_p_values(storage->r_outer_i[storage->no_of_shells_i - 1], N, pp);
// Done with the initialization
// Loop over wavelengths in spectrum
#pragma omp for
for (int nu_idx = 0; nu_idx < inu_size ; ++nu_idx)
{
nu = inu[nu_idx];
// Loop over discrete values along line
for (int p_idx = 1; p_idx < N; ++p_idx)
{
escat_contrib = 0;
p = pp[p_idx];
// initialize z intersections for p values
size_z = populate_z(storage, p, z, shell_id);
// initialize I_nu
if (p <= R_ph)
I_nu[p_idx] = intensity_black_body(nu * z[0], iT);
else
I_nu[p_idx] = 0;
// Find first contributing line
nu_start = nu * z[0];
nu_end = nu * z[1];
line_search(
storage->line_list_nu,
nu_start,
size_line,
&idx_nu_start
);
offset = shell_id[0] * size_line;
// start tracking accumulated e-scattering optical depth
zstart = storage->time_explosion / C_INV * (1. - z[0]);
// Initialize pointers
pline = storage->line_list_nu + idx_nu_start;
pexp_tau = exp_tau + offset + idx_nu_start;
patt_S_ul = att_S_ul + offset + idx_nu_start;
pJred_lu = Jred_lu + offset + idx_nu_start;
pJblue_lu = Jblue_lu + offset + idx_nu_start;
// flag for first contribution to integration on current p-ray
first = 1;
// TODO: Ugly loop
// Loop over all intersections
// TODO: replace by number of intersections and remove break
for (i = 0; i < size_z - 1; ++i)
{
escat_op = storage->electron_densities_i[shell_id[i]] * storage->sigma_thomson;
nu_end = nu * z[i+1];
// TODO: e-scattering: in principle we also have to check
// that dtau is <<1 (as assumed in Lucy 1999); if not, there
// is the chance that I_nu_b becomes negative
for (;pline < storage->line_list_nu + size_line;
// We have to increment all pointers simultaneously
++pline,
++pexp_tau,
++patt_S_ul,
++pJblue_lu)
{
if (*pline < nu_end)
{
// next resonance not in current shell
break;
}
// Calculate e-scattering optical depth to next resonance point
zend = storage->time_explosion / C_INV * (1. - *pline / nu);
if (first == 1){
// First contribution to integration
// NOTE: this treatment of I_nu_b (given by boundary
// conditions) is not in Lucy 1999; should be
// re-examined carefully
escat_contrib += (zend - zstart) * escat_op * (*pJblue_lu - I_nu[p_idx]) ;
first = 0;
}
else{
// Account for e-scattering, c.f. Eqs 27, 28 in Lucy 1999
Jkkp = 0.5 * (*pJred_lu + *pJblue_lu);
escat_contrib += (zend - zstart) * escat_op * (Jkkp - I_nu[p_idx]) ;
// this introduces the necessary offset of one element between pJblue_lu and pJred_lu
pJred_lu += 1;
}
I_nu[p_idx] = I_nu[p_idx] + escat_contrib;
// Lucy 1999, Eq 26
I_nu[p_idx] = I_nu[p_idx] * (*pexp_tau) + *patt_S_ul;
// reset e-scattering opacity
escat_contrib = 0;
zstart = zend;
}
// Calculate e-scattering optical depth to grid cell boundary
Jkkp = 0.5 * (*pJred_lu + *pJblue_lu);
zend = storage->time_explosion / C_INV * (1. - nu_end / nu);
escat_contrib += (zend - zstart) * escat_op * (Jkkp - I_nu[p_idx]);
zstart = zend;
if (i < size_z-1){
// advance pointers
direction = shell_id[i+1] - shell_id[i];
pexp_tau += direction * size_line;
patt_S_ul += direction * size_line;
pJred_lu += direction * size_line;
pJblue_lu += direction * size_line;
}
}
I_nu[p_idx] *= p;
}
// TODO: change integration to match the calculation of p values
L[nu_idx] = 8 * M_PI * M_PI * trapezoid_integration(I_nu, R_max/N, N);
#pragma omp atomic update
++finished_nus;
if (finished_nus%10 == 0){
print_progress_fi(finished_nus, inu_size);
}
}
// Free everything allocated on heap
printf("\n");
}
return L;
}
|
SSDIndex.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include <limits>
#include "inc/Core/Common.h"
#include "inc/Core/Common/DistanceUtils.h"
#include "inc/Core/Common/QueryResultSet.h"
#include "inc/Core/SPANN/Index.h"
#include "inc/Core/SPANN/ExtraFullGraphSearcher.h"
#include "inc/Helper/VectorSetReader.h"
#include "inc/Helper/StringConvert.h"
#include "inc/SSDServing/Utils.h"
namespace SPTAG {
namespace SSDServing {
namespace SSDIndex {
template <typename ValueType>
void OutputResult(const std::string& p_output, std::vector<QueryResult>& p_results, int p_resultNum)
{
if (!p_output.empty())
{
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(p_output.c_str(), std::ios::binary | std::ios::out)) {
LOG(Helper::LogLevel::LL_Error, "Failed create file: %s\n", p_output.c_str());
exit(1);
}
int32_t i32Val = static_cast<int32_t>(p_results.size());
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Fail to write result file!\n");
exit(1);
}
i32Val = p_resultNum;
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Fail to write result file!\n");
exit(1);
}
float fVal = 0;
for (size_t i = 0; i < p_results.size(); ++i)
{
for (int j = 0; j < p_resultNum; ++j)
{
i32Val = p_results[i].GetResult(j)->VID;
if (ptr->WriteBinary(sizeof(i32Val), reinterpret_cast<char*>(&i32Val)) != sizeof(i32Val)) {
LOG(Helper::LogLevel::LL_Error, "Fail to write result file!\n");
exit(1);
}
fVal = p_results[i].GetResult(j)->Dist;
if (ptr->WriteBinary(sizeof(fVal), reinterpret_cast<char*>(&fVal)) != sizeof(fVal)) {
LOG(Helper::LogLevel::LL_Error, "Fail to write result file!\n");
exit(1);
}
}
}
}
}
template<typename T, typename V>
void PrintPercentiles(const std::vector<V>& p_values, std::function<T(const V&)> p_get, const char* p_format)
{
double sum = 0;
std::vector<T> collects;
collects.reserve(p_values.size());
for (const auto& v : p_values)
{
T tmp = p_get(v);
sum += tmp;
collects.push_back(tmp);
}
std::sort(collects.begin(), collects.end());
LOG(Helper::LogLevel::LL_Info, "Avg\t50tiles\t90tiles\t95tiles\t99tiles\t99.9tiles\tMax\n");
std::string formatStr("%.3lf");
for (int i = 1; i < 7; ++i)
{
formatStr += '\t';
formatStr += p_format;
}
formatStr += '\n';
LOG(Helper::LogLevel::LL_Info,
formatStr.c_str(),
sum / collects.size(),
collects[static_cast<size_t>(collects.size() * 0.50)],
collects[static_cast<size_t>(collects.size() * 0.90)],
collects[static_cast<size_t>(collects.size() * 0.95)],
collects[static_cast<size_t>(collects.size() * 0.99)],
collects[static_cast<size_t>(collects.size() * 0.999)],
collects[static_cast<size_t>(collects.size() - 1)]);
}
template <typename ValueType>
void SearchSequential(SPANN::Index<ValueType>* p_index,
int p_numThreads,
std::vector<QueryResult>& p_results,
std::vector<SPANN::SearchStats>& p_stats,
int p_maxQueryCount, int p_internalResultNum)
{
int numQueries = min(static_cast<int>(p_results.size()), p_maxQueryCount);
std::atomic_size_t queriesSent(0);
std::vector<std::thread> threads;
LOG(Helper::LogLevel::LL_Info, "Searching: numThread: %d, numVectors: %d.\n", p_numThreads, numQueries);
Utils::StopW sw;
auto func = [&]()
{
Utils::StopW threadws;
size_t index = 0;
while (true)
{
index = queriesSent.fetch_add(1);
if (index < numQueries)
{
if ((index & ((1 << 14) - 1)) == 0)
{
LOG(Helper::LogLevel::LL_Info, "Sent %.2lf%%...\n", index * 100.0 / numQueries);
}
double startTime = threadws.getElapsedMs();
p_index->GetMemoryIndex()->SearchIndex(p_results[index]);
double endTime = threadws.getElapsedMs();
p_index->DebugSearchDiskIndex(p_results[index], p_internalResultNum, p_internalResultNum, &(p_stats[index]));
double exEndTime = threadws.getElapsedMs();
p_stats[index].m_exLatency = exEndTime - endTime;
p_stats[index].m_totalLatency = p_stats[index].m_totalSearchLatency = exEndTime - startTime;
}
else
{
return;
}
}
};
for (int i = 0; i < p_numThreads; i++) { threads.emplace_back(func); }
for (auto& thread : threads) { thread.join(); }
double sendingCost = sw.getElapsedSec();
LOG(Helper::LogLevel::LL_Info,
"Finish sending in %.3lf seconds, actuallQPS is %.2lf, query count %u.\n",
sendingCost,
numQueries / sendingCost,
static_cast<uint32_t>(numQueries));
}
template <typename ValueType>
void Search(SPANN::Index<ValueType>* p_index)
{
SPANN::Options& p_opts = *(p_index->GetOptions());
std::string outputFile = p_opts.m_searchResult;
std::string truthFile = p_opts.m_truthPath;
std::string warmupFile = p_opts.m_warmupPath;
if (COMMON::DistanceUtils::Quantizer)
{
COMMON::DistanceUtils::Quantizer->SetEnableADC(p_opts.m_enableADC);
}
if (!p_opts.m_logFile.empty())
{
g_pLogger.reset(new Helper::FileLogger(Helper::LogLevel::LL_Info, p_opts.m_logFile.c_str()));
}
int numThreads = p_opts.m_iSSDNumberOfThreads;
int internalResultNum = p_opts.m_searchInternalResultNum;
int K = p_opts.m_resultNum;
int truthK = (p_opts.m_truthResultNum <= 0) ? K : p_opts.m_truthResultNum;
if (!warmupFile.empty())
{
LOG(Helper::LogLevel::LL_Info, "Start loading warmup query set...\n");
std::shared_ptr<Helper::ReaderOptions> queryOptions(new Helper::ReaderOptions(p_opts.m_valueType, p_opts.m_dim, p_opts.m_warmupType, p_opts.m_warmupDelimiter));
auto queryReader = Helper::VectorSetReader::CreateInstance(queryOptions);
if (ErrorCode::Success != queryReader->LoadFile(p_opts.m_warmupPath))
{
LOG(Helper::LogLevel::LL_Error, "Failed to read query file.\n");
exit(1);
}
auto warmupQuerySet = queryReader->GetVectorSet();
int warmupNumQueries = warmupQuerySet->Count();
std::vector<QueryResult> warmupResults(warmupNumQueries, QueryResult(NULL, max(K, internalResultNum), false));
std::vector<SPANN::SearchStats> warmpUpStats(warmupNumQueries);
for (int i = 0; i < warmupNumQueries; ++i)
{
warmupResults[i].SetTarget(reinterpret_cast<ValueType*>(warmupQuerySet->GetVector(i)));
warmupResults[i].Reset();
}
LOG(Helper::LogLevel::LL_Info, "Start warmup...\n");
SearchSequential(p_index, numThreads, warmupResults, warmpUpStats, p_opts.m_queryCountLimit, internalResultNum);
LOG(Helper::LogLevel::LL_Info, "\nFinish warmup...\n");
}
LOG(Helper::LogLevel::LL_Info, "Start loading QuerySet...\n");
std::shared_ptr<Helper::ReaderOptions> queryOptions(new Helper::ReaderOptions(p_opts.m_valueType, p_opts.m_dim, p_opts.m_queryType, p_opts.m_queryDelimiter));
auto queryReader = Helper::VectorSetReader::CreateInstance(queryOptions);
if (ErrorCode::Success != queryReader->LoadFile(p_opts.m_queryPath))
{
LOG(Helper::LogLevel::LL_Error, "Failed to read query file.\n");
exit(1);
}
auto querySet = queryReader->GetVectorSet();
int numQueries = querySet->Count();
std::vector<QueryResult> results(numQueries, QueryResult(NULL, max(K, internalResultNum), false));
std::vector<SPANN::SearchStats> stats(numQueries);
for (int i = 0; i < numQueries; ++i)
{
results[i].SetTarget(reinterpret_cast<ValueType*>(querySet->GetVector(i)));
results[i].Reset();
}
LOG(Helper::LogLevel::LL_Info, "Start ANN Search...\n");
SearchSequential(p_index, numThreads, results, stats, p_opts.m_queryCountLimit, internalResultNum);
LOG(Helper::LogLevel::LL_Info, "\nFinish ANN Search...\n");
std::shared_ptr<VectorSet> vectorSet;
if (!p_opts.m_vectorPath.empty() && fileexists(p_opts.m_vectorPath.c_str())) {
std::shared_ptr<Helper::ReaderOptions> vectorOptions(new Helper::ReaderOptions(p_opts.m_valueType, p_opts.m_dim, p_opts.m_vectorType, p_opts.m_vectorDelimiter));
auto vectorReader = Helper::VectorSetReader::CreateInstance(vectorOptions);
if (ErrorCode::Success == vectorReader->LoadFile(p_opts.m_vectorPath))
{
vectorSet = vectorReader->GetVectorSet();
if (p_opts.m_distCalcMethod == DistCalcMethod::Cosine) vectorSet->Normalize(numThreads);
LOG(Helper::LogLevel::LL_Info, "\nLoad VectorSet(%d,%d).\n", vectorSet->Count(), vectorSet->Dimension());
}
}
if (p_opts.m_rerank > 0 && vectorSet != nullptr) {
LOG(Helper::LogLevel::LL_Info, "\n Begin rerank...\n");
COMMON::DistanceUtils::Quantizer.reset();
for (int i = 0; i < results.size(); i++)
{
for (int j = 0; j < K; j++)
{
if (results[i].GetResult(j)->VID < 0) continue;
results[i].GetResult(j)->Dist = COMMON::DistanceUtils::ComputeDistance((const ValueType*)querySet->GetVector(i),
(const ValueType*)vectorSet->GetVector(results[i].GetResult(j)->VID), querySet->Dimension(), p_opts.m_distCalcMethod);
}
BasicResult* re = results[i].GetResults();
std::sort(re, re + K, COMMON::Compare);
}
K = p_opts.m_rerank;
}
float recall = 0;
std::vector<std::set<SizeType>> truth;
if (!truthFile.empty())
{
LOG(Helper::LogLevel::LL_Info, "Start loading TruthFile...\n");
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(truthFile.c_str(), std::ios::in | std::ios::binary)) {
LOG(Helper::LogLevel::LL_Error, "Failed open truth file: %s\n", truthFile.c_str());
exit(1);
}
int originalK = truthK;
COMMON::TruthSet::LoadTruth(ptr, truth, numQueries, originalK, truthK, p_opts.m_truthType);
char tmp[4];
if (ptr->ReadBinary(4, tmp) == 4) {
LOG(Helper::LogLevel::LL_Error, "Truth number is larger than query number(%d)!\n", numQueries);
}
recall = COMMON::TruthSet::CalculateRecall<ValueType>((p_index->GetMemoryIndex()).get(), results, truth, K, truthK, querySet, vectorSet, numQueries);
LOG(Helper::LogLevel::LL_Info, "Recall%d@%d: %f\n", truthK, K, recall);
}
LOG(Helper::LogLevel::LL_Info, "\nEx Elements Count:\n");
PrintPercentiles<double, SPANN::SearchStats>(stats,
[](const SPANN::SearchStats& ss) -> double
{
return ss.m_totalListElementsCount;
},
"%.3lf");
LOG(Helper::LogLevel::LL_Info, "\nHead Latency Distribution:\n");
PrintPercentiles<double, SPANN::SearchStats>(stats,
[](const SPANN::SearchStats& ss) -> double
{
return ss.m_totalSearchLatency - ss.m_exLatency;
},
"%.3lf");
LOG(Helper::LogLevel::LL_Info, "\nEx Latency Distribution:\n");
PrintPercentiles<double, SPANN::SearchStats>(stats,
[](const SPANN::SearchStats& ss) -> double
{
return ss.m_exLatency;
},
"%.3lf");
LOG(Helper::LogLevel::LL_Info, "\nTotal Latency Distribution:\n");
PrintPercentiles<double, SPANN::SearchStats>(stats,
[](const SPANN::SearchStats& ss) -> double
{
return ss.m_totalSearchLatency;
},
"%.3lf");
LOG(Helper::LogLevel::LL_Info, "\nTotal Disk Page Access Distribution:\n");
PrintPercentiles<int, SPANN::SearchStats>(stats,
[](const SPANN::SearchStats& ss) -> int
{
return ss.m_diskAccessCount;
},
"%4d");
LOG(Helper::LogLevel::LL_Info, "\nTotal Disk IO Distribution:\n");
PrintPercentiles<int, SPANN::SearchStats>(stats,
[](const SPANN::SearchStats& ss) -> int
{
return ss.m_diskIOCount;
},
"%4d");
LOG(Helper::LogLevel::LL_Info, "\n");
if (!outputFile.empty())
{
LOG(Helper::LogLevel::LL_Info, "Start output to %s\n", outputFile.c_str());
OutputResult<ValueType>(outputFile, results, K);
}
LOG(Helper::LogLevel::LL_Info,
"Recall: %f\n",
recall);
LOG(Helper::LogLevel::LL_Info, "\n");
if (p_opts.m_recall_analysis) {
LOG(Helper::LogLevel::LL_Info, "Start recall analysis...\n");
std::shared_ptr<VectorIndex> headIndex = p_index->GetMemoryIndex();
SizeType sampleSize = numQueries < 100 ? numQueries : 100;
SizeType sampleK = headIndex->GetNumSamples() < 1000 ? headIndex->GetNumSamples() : 1000;
float sampleE = 1e-6f;
std::vector<SizeType> samples(sampleSize, 0);
std::vector<float> queryHeadRecalls(sampleSize, 0);
std::vector<float> truthRecalls(sampleSize, 0);
std::vector<int> shouldSelect(sampleSize, 0);
std::vector<int> shouldSelectLong(sampleSize, 0);
std::vector<int> nearQueryHeads(sampleSize, 0);
std::vector<int> annNotFound(sampleSize, 0);
std::vector<int> rngRule(sampleSize, 0);
std::vector<int> postingCut(sampleSize, 0);
for (int i = 0; i < sampleSize; i++) samples[i] = COMMON::Utils::rand(numQueries);
#pragma omp parallel for schedule(dynamic)
for (int i = 0; i < sampleSize; i++)
{
COMMON::QueryResultSet<ValueType> queryANNHeads((const ValueType*)(querySet->GetVector(samples[i])), max(K, internalResultNum));
headIndex->SearchIndex(queryANNHeads);
float queryANNHeadsLongestDist = queryANNHeads.GetResult(internalResultNum - 1)->Dist;
COMMON::QueryResultSet<ValueType> queryBFHeads((const ValueType*)(querySet->GetVector(samples[i])), max(sampleK, internalResultNum));
for (SizeType y = 0; y < headIndex->GetNumSamples(); y++)
{
float dist = headIndex->ComputeDistance(queryBFHeads.GetQuantizedTarget(), headIndex->GetSample(y));
queryBFHeads.AddPoint(y, dist);
}
queryBFHeads.SortResult();
{
std::vector<bool> visited(internalResultNum, false);
for (SizeType y = 0; y < internalResultNum; y++)
{
for (SizeType z = 0; z < internalResultNum; z++)
{
if (visited[z]) continue;
if (fabs(queryANNHeads.GetResult(z)->Dist - queryBFHeads.GetResult(y)->Dist) < sampleE)
{
queryHeadRecalls[i] += 1;
visited[z] = true;
break;
}
}
}
}
std::map<int, std::set<int>> tmpFound; // headID->truths
p_index->DebugSearchDiskIndex(queryBFHeads, internalResultNum, sampleK, nullptr, &truth[samples[i]], &tmpFound);
for (SizeType z = 0; z < K; z++) {
truthRecalls[i] += truth[samples[i]].count(queryBFHeads.GetResult(z)->VID);
}
for (SizeType z = 0; z < K; z++) {
truth[samples[i]].erase(results[samples[i]].GetResult(z)->VID);
}
for (std::map<int, std::set<int>>::iterator it = tmpFound.begin(); it != tmpFound.end(); it++) {
float q2truthposting = headIndex->ComputeDistance(querySet->GetVector(samples[i]), headIndex->GetSample(it->first));
for (auto vid : it->second) {
if (!truth[samples[i]].count(vid)) continue;
if (q2truthposting < queryANNHeadsLongestDist) shouldSelect[i] += 1;
else {
shouldSelectLong[i] += 1;
std::set<int> nearQuerySelectedHeads;
float v2vhead = headIndex->ComputeDistance(vectorSet->GetVector(vid), headIndex->GetSample(it->first));
for (SizeType z = 0; z < internalResultNum; z++) {
if (queryANNHeads.GetResult(z)->VID < 0) break;
float v2qhead = headIndex->ComputeDistance(vectorSet->GetVector(vid), headIndex->GetSample(queryANNHeads.GetResult(z)->VID));
if (v2qhead < v2vhead) {
nearQuerySelectedHeads.insert(queryANNHeads.GetResult(z)->VID);
}
}
if (nearQuerySelectedHeads.size() == 0) continue;
nearQueryHeads[i] += 1;
COMMON::QueryResultSet<ValueType> annTruthHead((const ValueType*)(vectorSet->GetVector(vid)), p_opts.m_debugBuildInternalResultNum);
headIndex->SearchIndex(annTruthHead);
bool found = false;
for (SizeType z = 0; z < annTruthHead.GetResultNum(); z++) {
if (nearQuerySelectedHeads.count(annTruthHead.GetResult(z)->VID)) {
found = true;
break;
}
}
if (!found) {
annNotFound[i] += 1;
continue;
}
// RNG rule and posting cut
std::set<int> replicas;
for (SizeType z = 0; z < annTruthHead.GetResultNum() && replicas.size() < p_opts.m_replicaCount; z++) {
BasicResult* item = annTruthHead.GetResult(z);
if (item->VID < 0) break;
bool good = true;
for (auto r : replicas) {
if (p_opts.m_rngFactor * headIndex->ComputeDistance(headIndex->GetSample(r), headIndex->GetSample(item->VID)) < item->Dist) {
good = false;
break;
}
}
if (good) replicas.insert(item->VID);
}
found = false;
for (auto r : nearQuerySelectedHeads) {
if (replicas.count(r)) {
found = true;
break;
}
}
if (found) postingCut[i] += 1;
else rngRule[i] += 1;
}
}
}
}
float headacc = 0, truthacc = 0, shorter = 0, longer = 0, lost = 0, buildNearQueryHeads = 0, buildAnnNotFound = 0, buildRNGRule = 0, buildPostingCut = 0;
for (int i = 0; i < sampleSize; i++) {
headacc += queryHeadRecalls[i];
truthacc += truthRecalls[i];
lost += shouldSelect[i] + shouldSelectLong[i];
shorter += shouldSelect[i];
longer += shouldSelectLong[i];
buildNearQueryHeads += nearQueryHeads[i];
buildAnnNotFound += annNotFound[i];
buildRNGRule += rngRule[i];
buildPostingCut += postingCut[i];
}
LOG(Helper::LogLevel::LL_Info, "Query head recall @%d:%f.\n", internalResultNum, headacc / sampleSize / internalResultNum);
LOG(Helper::LogLevel::LL_Info, "BF top %d postings truth recall @%d:%f.\n", sampleK, truthK, truthacc / sampleSize / truthK);
LOG(Helper::LogLevel::LL_Info,
"Percent of truths in postings have shorter distance than query selected heads: %f percent\n",
shorter / lost * 100);
LOG(Helper::LogLevel::LL_Info,
"Percent of truths in postings have longer distance than query selected heads: %f percent\n",
longer / lost * 100);
LOG(Helper::LogLevel::LL_Info,
"\tPercent of truths no shorter distance in query selected heads: %f percent\n",
(longer - buildNearQueryHeads) / lost * 100);
LOG(Helper::LogLevel::LL_Info,
"\tPercent of truths exists shorter distance in query selected heads: %f percent\n",
buildNearQueryHeads / lost * 100);
LOG(Helper::LogLevel::LL_Info,
"\t\tRNG rule ANN search loss: %f percent\n", buildAnnNotFound / lost * 100);
LOG(Helper::LogLevel::LL_Info,
"\t\tPosting cut loss: %f percent\n", buildPostingCut / lost * 100);
LOG(Helper::LogLevel::LL_Info,
"\t\tRNG rule loss: %f percent\n", buildRNGRule / lost * 100);
}
}
}
}
}
|
GB_unaryop__identity_uint16_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_uint32
// op(A') function: GB_tran__identity_uint16_uint32
// C type: uint16_t
// A type: uint32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_uint32
(
uint16_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
threading_utils.h | /*!
* Copyright 2015-2019 by Contributors
* \file common.h
* \brief Threading utilities
*/
#ifndef XGBOOST_COMMON_THREADING_UTILS_H_
#define XGBOOST_COMMON_THREADING_UTILS_H_
#include <vector>
#include <algorithm>
namespace xgboost {
namespace common {
// Represent simple range of indexes [begin, end)
// Inspired by tbb::blocked_range
class Range1d {
public:
Range1d(size_t begin, size_t end): begin_(begin), end_(end) {
CHECK_LT(begin, end);
}
size_t begin() {
return begin_;
}
size_t end() {
return end_;
}
private:
size_t begin_;
size_t end_;
};
// Split 2d space to balanced blocks
// Implementation of the class is inspired by tbb::blocked_range2d
// However, TBB provides only (n x m) 2d range (matrix) separated by blocks. Example:
// [ 1,2,3 ]
// [ 4,5,6 ]
// [ 7,8,9 ]
// But the class is able to work with different sizes in each 'row'. Example:
// [ 1,2 ]
// [ 3,4,5,6 ]
// [ 7,8,9]
// If grain_size is 2: It produces following blocks:
// [1,2], [3,4], [5,6], [7,8], [9]
// The class helps to process data in several tree nodes (non-balanced usually) in parallel
// Using nested parallelism (by nodes and by data in each node)
// it helps to improve CPU resources utilization
class BlockedSpace2d {
public:
// Example of space:
// [ 1,2 ]
// [ 3,4,5,6 ]
// [ 7,8,9]
// BlockedSpace2d will create following blocks (tasks) if grain_size=2:
// 1-block: first_dimension = 0, range of indexes in a 'row' = [0,2) (includes [1,2] values)
// 2-block: first_dimension = 1, range of indexes in a 'row' = [0,2) (includes [3,4] values)
// 3-block: first_dimension = 1, range of indexes in a 'row' = [2,4) (includes [5,6] values)
// 4-block: first_dimension = 2, range of indexes in a 'row' = [0,2) (includes [7,8] values)
// 5-block: first_dimension = 2, range of indexes in a 'row' = [2,3) (includes [9] values)
// Arguments:
// dim1 - size of the first dimension in the space
// getter_size_dim2 - functor to get the second dimensions for each 'row' by row-index
// grain_size - max size of produced blocks
template<typename Func>
BlockedSpace2d(size_t dim1, Func getter_size_dim2, size_t grain_size) {
for (size_t i = 0; i < dim1; ++i) {
const size_t size = getter_size_dim2(i);
const size_t n_blocks = size/grain_size + !!(size % grain_size);
for (size_t iblock = 0; iblock < n_blocks; ++iblock) {
const size_t begin = iblock * grain_size;
const size_t end = std::min(begin + grain_size, size);
AddBlock(i, begin, end);
}
}
}
// Amount of blocks(tasks) in a space
size_t Size() const {
return ranges_.size();
}
// get index of the first dimension of i-th block(task)
size_t GetFirstDimension(size_t i) const {
CHECK_LT(i, first_dimension_.size());
return first_dimension_[i];
}
// get a range of indexes for the second dimension of i-th block(task)
Range1d GetRange(size_t i) const {
CHECK_LT(i, ranges_.size());
return ranges_[i];
}
private:
void AddBlock(size_t first_dimension, size_t begin, size_t end) {
first_dimension_.push_back(first_dimension);
ranges_.emplace_back(begin, end);
}
std::vector<Range1d> ranges_;
std::vector<size_t> first_dimension_;
};
// Wrapper to implement nested parallelism with simple omp parallel for
template<typename Func>
void ParallelFor2d(const BlockedSpace2d& space, Func func) {
const int num_blocks_in_space = static_cast<int>(space.Size());
#pragma omp parallel for
for (auto i = 0; i < num_blocks_in_space; i++) {
func(space.GetFirstDimension(i), space.GetRange(i));
}
}
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_THREADING_UTILS_H_
|
GB_binop__times_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_uint16)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__times_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__times_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_uint16)
// A*D function (colscale): GB (_AxD__times_uint16)
// D*A function (rowscale): GB (_DxB__times_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__times_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__times_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_uint16)
// C=scalar+B GB (_bind1st__times_uint16)
// C=scalar+B' GB (_bind1st_tran__times_uint16)
// C=A+scalar GB (_bind2nd__times_uint16)
// C=A'+scalar GB (_bind2nd_tran__times_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT16 || GxB_NO_TIMES_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
1590.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute thread_limit(256)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
}
/* Calculate the m * m correlation matrix. */
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
rawmd5u_fmt_plug.c | /*
* Thick raw-md5-unicode (come-back :)
*
* This software is Copyright (c) 2011 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawmd5uthick;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawmd5uthick);
#else
#include <string.h>
#include "arch.h"
#ifdef MMX_COEF
#define NBKEYS (MMX_COEF * MD5_SSE_PARA)
#endif
#include "sse-intrinsics.h"
#include "md5.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "unicode.h"
#include "memory.h"
#include "johnswap.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-MD5u"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "md5(unicode($p)) " MD5_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define CIPHERTEXT_LENGTH 32
#define BINARY_SIZE 16
#define BINARY_ALIGN 4
#define SALT_SIZE 0
#define SALT_ALIGN 1
#ifdef MMX_COEF
#define BLOCK_LOOPS 1
#define PLAINTEXT_LENGTH 27
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS * BLOCK_LOOPS
#define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*16*MMX_COEF*4 )
#else
#define PLAINTEXT_LENGTH 125
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#ifdef MMX_COEF
static unsigned char (*saved_key);
static unsigned char (*crypt_key);
static unsigned int (**buf_ptr);
#else
static MD5_CTX ctx;
static int saved_key_length;
static UTF16 saved_key[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 crypt_key[BINARY_SIZE / 4];
#endif
/* Note some plaintexts will be replaced in init() if running UTF-8 */
static struct fmt_tests tests[] = {
{"16c47151c18ac087cd12b3a70746c790", "test1"},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"d41d8cd98f00b204e9800998ecf8427e", ""},
{"9c3abef89ff76f8acd80eae37b35f64f", "test2"},
{"849ee1b88b5d887bdb058180a666b450", "test3"},
{"8c4cb7e8b33b56a833cdaa8673f3b425", "test4"},
{"537e738b1ac5551f65106368dc301ece", "thatsworking"},
{NULL}
};
static void set_key_utf8(char *_key, int index);
static void set_key_CP(char *_key, int index);
static void init(struct fmt_main *self)
{
#if MMX_COEF
int i;
#endif
if (pers_opts.target_enc == UTF_8) {
/* This avoids an if clause for every set_key */
self->methods.set_key = set_key_utf8;
#if MMX_COEF
/* kick it up from 27. We will truncate in setkey_utf8() */
self->params.plaintext_length = 3 * PLAINTEXT_LENGTH;
#endif
tests[1].ciphertext = "94a4e171de16580742c4d141e6607bf7";
tests[1].plaintext = "\xE2\x82\xAC"; // Euro sign
tests[2].ciphertext = "03c60810f0e54d16e826aca385d776c8";
tests[2].plaintext = "\xE2\x82\xAC\xE2\x82\xAC"; // 2 x euro
tests[3].ciphertext = "2d554433d7cde7ec8d16aaf126c3be6b";
tests[3].plaintext = "\xE2\x82\xAC\xC3\xBC"; // euro and u-umlaut
tests[4].ciphertext = "8007d9070b27db7b30433df2cd10abc1";
tests[4].plaintext = "\xC3\xBC\xE2\x82\xAC"; // u-umlaut and euro
} else {
if (pers_opts.target_enc != ASCII &&
pers_opts.target_enc != ISO_8859_1) {
/* This avoids an if clause for every set_key */
self->methods.set_key = set_key_CP;
}
if (CP_to_Unicode[0xfc] == 0x00fc) {
tests[1].ciphertext = "ea7ab2b5c07650badab30790d0c9b63e";
tests[1].plaintext = "\xFC"; // German u-umlaut in iso-8859-1
tests[2].ciphertext = "f0a0b9f1dea0e458cec9a284ff434d44";
tests[2].plaintext = "\xFC\xFC";
tests[3].ciphertext = "d25a0b436b768777cc9a343d283dbf5a";
tests[3].plaintext = "\xFC\xFC\xFC";
tests[4].ciphertext = "719917322bf12168f8c55939e4fec8de";
tests[4].plaintext = "\xFC\xFC\xFC\xFC";
}
}
#if MMX_COEF
saved_key = mem_calloc_tiny(sizeof(*saved_key) * 64*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD);
crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * BINARY_SIZE*self->params.max_keys_per_crypt, MEM_ALIGN_SIMD);
buf_ptr = mem_calloc_tiny(sizeof(*buf_ptr) * self->params.max_keys_per_crypt, sizeof(*buf_ptr));
for (i=0; i<self->params.max_keys_per_crypt; i++)
buf_ptr[i] = (unsigned int*)&saved_key[GETPOS(0, i)];
#endif
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[32+12+1];
if (!strncmp(ciphertext, "$dynamic_29$", 12))
ciphertext += 12;
strcpy(out, "$dynamic_29$");
memcpy(&out[12], ciphertext, 32);
out[sizeof(out)-1] = 0;
strlwr(&out[12]);
return out;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *pos;
if (!strncmp(ciphertext, "$dynamic_29$", 12))
ciphertext += 12;
for (pos = ciphertext; atoi16[ARCH_INDEX(*pos)] != 0x7F; pos++);
if (!*pos && pos - ciphertext == CIPHERTEXT_LENGTH)
return 1;
else
return 0;
}
static void *binary(char *ciphertext)
{
static union {
unsigned long dummy;
unsigned int i[BINARY_SIZE/sizeof(unsigned int)];
} _out;
unsigned int *out = _out.i;
unsigned int i;
unsigned int temp;
ciphertext+=12;
for (i=0; i<4; i++)
{
temp = (atoi16[ARCH_INDEX(ciphertext[i*8+0])])<<4;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+1])]);
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+2])])<<12;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+3])])<<8;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+4])])<<20;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+5])])<<16;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+6])])<<28;
temp |= (atoi16[ARCH_INDEX(ciphertext[i*8+7])])<<24;
#if ARCH_LITTLE_ENDIAN
out[i]=temp;
#else
out[i]=JOHNSWAP(temp);
#endif
}
return out;
}
// ISO-8859-1 to UCS-2, directly into vector key buffer
static void set_key(char *_key, int index)
{
#ifdef MMX_COEF
const unsigned char *key = (unsigned char*)_key;
unsigned int *keybuf_word = buf_ptr[index];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp2 |= (temp << 16);
*keybuf_word = temp2;
}
else
{
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += MMX_COEF;
}
*keybuf_word = 0x80;
key_cleaning:
keybuf_word += MMX_COEF;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
((unsigned int *)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4;
#else
#if ARCH_LITTLE_ENDIAN
UTF8 *s = (UTF8*)_key;
UTF16 *d = saved_key;
while (*s)
*d++ = *s++;
*d = 0;
saved_key_length = (int)((char*)d - (char*)saved_key);
#else
UTF8 *s = (UTF8*)_key;
UTF8 *d = (UTF8*)saved_key;
while (*s) {
*d++ = *s++;
++d;
}
*d = 0;
saved_key_length = (int)((char*)d - (char*)saved_key);
#endif
#endif
}
// Legacy codepage to UCS-2, directly into vector key buffer
static void set_key_CP(char *_key, int index)
{
#ifdef MMX_COEF
const unsigned char *key = (unsigned char*)_key;
unsigned int *keybuf_word = buf_ptr[index];
unsigned int len, temp2;
len = 0;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++) && len < PLAINTEXT_LENGTH - 1)
{
temp = CP_to_Unicode[temp];
temp2 |= (temp << 16);
*keybuf_word = temp2;
} else {
temp2 |= (0x80 << 16);
*keybuf_word = temp2;
len++;
goto key_cleaning_enc;
}
len += 2;
keybuf_word += MMX_COEF;
}
*keybuf_word = 0x80;
key_cleaning_enc:
keybuf_word += MMX_COEF;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
((unsigned int *)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4;
#else
saved_key_length = enc_to_utf16((UTF16*)&saved_key,
PLAINTEXT_LENGTH + 1,
(unsigned char*)_key,
strlen(_key)) << 1;
if (saved_key_length < 0)
saved_key_length = strlen16(saved_key);
#endif
}
// UTF-8 to UCS-2, directly into vector key buffer
static void set_key_utf8(char *_key, int index)
{
#ifdef MMX_COEF
const UTF8 *source = (UTF8*)_key;
unsigned int *keybuf_word = buf_ptr[index];
UTF32 chl, chh = 0x80;
unsigned int len = 0;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH) {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += MMX_COEF;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else if (*source && len < PLAINTEXT_LENGTH) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = (chh << 16) | chl;
keybuf_word += MMX_COEF;
break;
}
*keybuf_word = (chh << 16) | chl;
keybuf_word += MMX_COEF;
}
if (chh != 0x80 || len == 0) {
*keybuf_word = 0x80;
keybuf_word += MMX_COEF;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF;
}
((unsigned int *)saved_key)[14*MMX_COEF + (index&3) + (index>>2)*16*MMX_COEF] = len << 4;
#else
saved_key_length = utf8_to_utf16((UTF16*)&saved_key,
PLAINTEXT_LENGTH + 1,
(unsigned char*)_key,
strlen(_key)) << 1;
if (saved_key_length < 0)
saved_key_length = strlen16(saved_key);
#endif
}
static char *get_key(int index)
{
#ifdef MMX_COEF
// Get the key back from the key buffer, from UCS-2
unsigned int *keybuffer = (unsigned int*)&saved_key[GETPOS(0, index)];
static UTF16 key[PLAINTEXT_LENGTH + 1];
unsigned int md5_size=0;
unsigned int i=0;
for(; md5_size < PLAINTEXT_LENGTH; i += MMX_COEF, md5_size++)
{
key[md5_size] = keybuffer[i];
key[md5_size+1] = keybuffer[i] >> 16;
if (key[md5_size] == 0x80 && key[md5_size+1] == 0) {
key[md5_size] = 0;
break;
}
++md5_size;
if (key[md5_size] == 0x80 && ((keybuffer[i+MMX_COEF]&0xFFFF) == 0 || md5_size == PLAINTEXT_LENGTH)) {
key[md5_size] = 0;
break;
}
}
return (char*)utf16_to_enc(key);
#else
return (char*)utf16_to_enc(saved_key);
#endif
}
static int cmp_all(void *binary, int count) {
#ifdef MMX_COEF
unsigned int x,y=0;
for(;y<MD5_SSE_PARA*BLOCK_LOOPS;y++)
for(x=0;x<MMX_COEF;x++)
{
if( ((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] )
return 1;
}
return 0;
#else
return !memcmp(binary, crypt_key, BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int count){
return (1);
}
static int cmp_one(void *binary, int index)
{
#ifdef MMX_COEF
unsigned int x,y;
x = index&3;
y = index/4;
if( ((ARCH_WORD_32*)binary)[0] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] )
return 0;
if( ((ARCH_WORD_32*)binary)[1] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4+MMX_COEF] )
return 0;
if( ((ARCH_WORD_32*)binary)[2] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4+2*MMX_COEF] )
return 0;
if( ((ARCH_WORD_32*)binary)[3] != ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4+3*MMX_COEF] )
return 0;
return 1;
#else
return !memcmp(binary, crypt_key, BINARY_SIZE);
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
#if defined(MMX_COEF)
#if (BLOCK_LOOPS > 1)
int i;
// This was an experiment. It's not used (unless you bump BLOCK_LOOPS),
// cause it does not scale well. We would need to parallelize set_key()
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < BLOCK_LOOPS; i++)
SSEmd5body(&saved_key[i*NBKEYS*64], (unsigned int*)&crypt_key[i*NBKEYS*BINARY_SIZE], NULL, SSEi_MIXED_IN);
#else
SSEmd5body(saved_key, (unsigned int*)crypt_key, NULL, SSEi_MIXED_IN);
#endif
#else
MD5_Init( &ctx );
MD5_Update(&ctx, (unsigned char*)saved_key, saved_key_length);
MD5_Final((unsigned char*) crypt_key, &ctx);
#endif
return count;
}
#ifdef MMX_COEF
static int get_hash_0(int index)
{
unsigned int x,y;
x = index&3;
y = index/4;
return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xf;
}
static int get_hash_1(int index)
{
unsigned int x,y;
x = index&3;
y = index/4;
return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xff;
}
static int get_hash_2(int index)
{
unsigned int x,y;
x = index&3;
y = index/4;
return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xfff;
}
static int get_hash_3(int index)
{
unsigned int x,y;
x = index&3;
y = index/4;
return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xffff;
}
static int get_hash_4(int index)
{
unsigned int x,y;
x = index&3;
y = index/4;
return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xfffff;
}
static int get_hash_5(int index)
{
unsigned int x,y;
x = index&3;
y = index/4;
return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0xffffff;
}
static int get_hash_6(int index)
{
unsigned int x,y;
x = index&3;
y = index/4;
return ((ARCH_WORD_32*)crypt_key)[x+y*MMX_COEF*4] & 0x7ffffff;
}
#else
static int get_hash_0(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xf; }
static int get_hash_1(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xff; }
static int get_hash_2(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xfff; }
static int get_hash_3(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xffff; }
static int get_hash_4(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xfffff; }
static int get_hash_5(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0xffffff; }
static int get_hash_6(int index) { return ((ARCH_WORD_32*)crypt_key)[index] & 0x7ffffff; }
#endif
struct fmt_main fmt_rawmd5uthick = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if (BLOCK_LOOPS > 1) && defined(SSE_MD5_PARA)
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_SPLIT_UNIFIES_CASE,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
fvde_fmt_plug.c | /* JtR format to crack FileVault 2 hashes.
*
* This software is Copyright (c) 2017, Dhiru Kholia <kholia at kth.se> and it
* is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Big thanks to Omar Choudary, Felix Grobert and Joachim Metz for making this
* format possible.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_fvde;
#elif FMT_REGISTERS_H
john_register_one(&fmt_fvde);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "aes.h"
#include "pbkdf2_hmac_sha256.h"
#include "jumbo.h"
#include "memdbg.h"
#include "fvde_common.h"
#define FORMAT_LABEL "FVDE"
#define FORMAT_NAME "FileVault 2"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 AES " SHA256_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "PBKDF2-SHA256 AES 64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 AES 32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(uint64_t)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static fvde_custom_salt *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (fvde_custom_salt *)salt;
}
static void fvde_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* Unwrap data using AES Key Wrap (RFC3394)
*
* Translated from "AESUnwrap" function in aeswrap.py from https://github.com/dinosec/iphone-dataprotection project.
*
* The C implementation "aes_key_unwrap" in ramdisk_tools/bsdcrypto/key_wrap.c doesn't look any better.
*
* "libfvde_encryption_aes_key_unwrap" isn't great to look at either.
*/
static int fvde_decrypt(fvde_custom_salt *cur_salt, unsigned char *key)
{
uint64_t *C = cur_salt->blob.qword; // len(C) == 3
int n = 2; // len(C) - 1
uint64_t R[3]; // n + 1 = 3
union {
uint64_t qword[2];
unsigned char stream[16];
} todecrypt;
int i, j;
AES_KEY akey;
uint64_t A = C[0];
AES_set_decrypt_key(key, 128, &akey);
for (i = 0; i < n + 1; i++)
R[i] = C[i];
for (j = 5; j >= 0; j--) { // 5 is fixed!
for (i = 2; i >=1; i--) { // i = n
#if ARCH_LITTLE_ENDIAN
todecrypt.qword[0] = JOHNSWAP64(A ^ (n*j+i));
todecrypt.qword[1] = JOHNSWAP64(R[i]);
AES_ecb_encrypt(todecrypt.stream, todecrypt.stream, &akey, AES_DECRYPT);
A = JOHNSWAP64(todecrypt.qword[0]);
R[i] = JOHNSWAP64(todecrypt.qword[1]);
#else
todecrypt.qword[0] = A ^ (n*j+i);
todecrypt.qword[1] = R[i];
AES_ecb_encrypt(todecrypt.stream, todecrypt.stream, &akey, AES_DECRYPT);
A = todecrypt.qword[0];
R[i] = todecrypt.qword[1];
#endif
}
}
if (A == 0xa6a6a6a6a6a6a6a6ULL)
return 1; // success!
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][16];
int i;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha256_sse((const unsigned char**)pin, lens, cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, pout, 16, 0);
#else
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
pbkdf2_sha256((unsigned char *)saved_key[index+i], strlen(saved_key[index+i]), cur_salt->salt, cur_salt->salt_length, cur_salt->iterations, master[i], 16, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cracked[index+i] = fvde_decrypt(cur_salt, master[i]);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_fvde = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
fvde_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
fvde_common_valid,
fmt_default_split,
fmt_default_binary,
fvde_common_get_salt,
{
fvde_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
fvde_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
_detection.c | #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <Python.h>
#include <numpy/arrayobject.h>
#include <math.h>
#ifdef __INTEL_COMPILER
#include <mkl_cblas.h>
#else
#include <cblas.h>
#define kmp_set_blocktime(k)
#endif
static inline int min (int a, int b) { return a < b ? a : b; }
static inline int max (int a, int b) { return a < b ? b : a; }
static inline int square(int x) { return x*x; }
static void max_filter_1d(const float *vals, float *out_vals, int32_t *I,
int s, int step, int n, float a, float b) {
int i;
for (i = 0; i < n; i++) {
float max_val = -INFINITY;
int argmax = 0;
int first = max(0, i-s);
int last = min(n-1, i+s);
int j;
for (j = first; j <= last; j++) {
float val = *(vals + j*step) - a*square(i-j) - b*(i-j);
if (val > max_val) {
max_val = val;
argmax = j;
}
}
*(out_vals + i*step) = max_val;
*(I + i*step) = argmax;
}
}
PyObject * deformation_cost (PyArrayObject * pydata, float ax, float bx, float ay, float by, int s) {
npy_intp * dims = PyArray_DIMS(pydata);
npy_intp * stride = PyArray_STRIDES(pydata);
if (PyArray_NDIM(pydata) != 2) {
PyErr_SetString(PyExc_TypeError, "data must be 2 dimensional.");
return NULL;
}
if (PyArray_DESCR(pydata)->type_num != NPY_FLOAT) {
PyErr_SetString(PyExc_TypeError, "data must be single precision floating point.");
return NULL;
}
if (stride[0] != dims[1]*sizeof(float)) {
PyErr_SetString(PyExc_TypeError, "Stride[0] must be sizeof(float).");
return NULL;
}
if (stride[1] != sizeof(float)) {
PyErr_SetString(PyExc_TypeError, "Stride[1] must be Dims[0]*sizeof(float).");
return NULL;
}
PyArrayObject * pydeformed = (PyArrayObject*)PyArray_SimpleNew((npy_intp)2, dims, NPY_FLOAT);
PyArrayObject * pyIx = (PyArrayObject*)PyArray_SimpleNew((npy_intp)2, dims, NPY_INT32);
PyArrayObject * pyIy = (PyArrayObject*)PyArray_SimpleNew((npy_intp)2, dims, NPY_INT32);
float *tmpM = (float *)calloc(dims[0]*dims[1], sizeof(float));
int32_t *tmpIx = (int32_t*)calloc(dims[0]*dims[1], sizeof(int32_t));
int32_t *tmpIy = (int32_t*)calloc(dims[0]*dims[1], sizeof(int32_t));
int x, y;
for (y = 0; y < dims[0]; y++)
max_filter_1d((float*)PyArray_GETPTR2(pydata, y, 0), tmpM+y*dims[1], tmpIx+y*dims[1], s, 1, dims[1], ax, bx);
for (x = 0; x < dims[1]; x++)
max_filter_1d(tmpM+x, (float*)PyArray_GETPTR2(pydeformed, 0, x), tmpIy+x, s, dims[1], dims[0], ay, by);
for (x = 0; x < dims[1]; ++x) {
for (y = 0; y < dims[0]; ++y) {
*(int32_t*)PyArray_GETPTR2(pyIy, y, x) = tmpIy[y*dims[1]+x];
*(int32_t*)PyArray_GETPTR2(pyIx, y, x) = tmpIx[tmpIy[y*dims[1]+x]*dims[1]+x];
}
}
free(tmpM);
free(tmpIx);
free(tmpIy);
return Py_BuildValue("NNN", pydeformed, pyIx, pyIy);
}
PyObject * filter_image (PyArrayObject * pyfeatures, PyArrayObject * pyfilter, float bias, int width, int height) {
npy_intp * features_dims = PyArray_DIMS(pyfeatures);
npy_intp * filter_dims = PyArray_DIMS(pyfilter);
int a, b, l;
PyArrayObject * pyfiltered = NULL;
npy_intp * features_stride = PyArray_STRIDES(pyfeatures);
npy_intp * filtered_stride = NULL;
npy_intp filtered_dims[2] = {0, 0};
int tight_width;
int tight_height;
if (PyArray_NDIM(pyfeatures) != 3) {
PyErr_SetString(PyExc_TypeError, "Features must be 3 dimensional.");
return NULL;
}
if (PyArray_NDIM(pyfilter) != 3) {
PyErr_SetString(PyExc_TypeError, "Filter must be 3 dimensional.");
return NULL;
}
if (PyArray_DESCR(pyfeatures)->type_num != NPY_FLOAT) {
PyErr_SetString(PyExc_TypeError, "Features must be single precision floating point.");
return NULL;
}
if (PyArray_DESCR(pyfilter)->type_num != NPY_FLOAT) {
PyErr_SetString(PyExc_TypeError, "Filter must be a single precision floating point.");
return NULL;
}
if (features_dims[2] != 32) {
PyErr_SetString(PyExc_TypeError, "features' feature dimsionality should be 32.");
return NULL;
}
if (filter_dims[2] != 32) {
PyErr_SetString(PyExc_TypeError, "filters' feature dimensionality should be 32.");
return NULL;
}
tight_height = features_dims[0]-filter_dims[0]+1;
tight_width = features_dims[1]-filter_dims[1]+1;
filtered_dims[0] = height ? height : tight_height;
filtered_dims[1] = width ? width : tight_width;
if (filtered_dims[0] < 1 || filtered_dims[1] < 1) {
PyErr_SetString(PyExc_TypeError, "Input features are too small for filter.");
return NULL;
}
#pragma omp critical
pyfiltered = (PyArrayObject*)PyArray_SimpleNew((npy_intp)2, filtered_dims, NPY_FLOAT);
filtered_stride = PyArray_STRIDES(pyfiltered);
/* zero out array */
for (a = 0; a < tight_height; ++a) {
for (b = 0; b < tight_width; ++b) {
*(float*)PyArray_GETPTR2(pyfiltered, a, b) = -bias;
}
}
/* iterate over filter which should be tiny compared to the image */
int i;
int stride_src = features_stride[1]/sizeof(float);
int stride_dst = filtered_stride[1]/sizeof(float);
for (i = 0; i < filter_dims[0]; ++i) {
int j;
for (j = 0; j < filter_dims[1]; ++j) {
int k;
for (k = 0; k < tight_height; ++k) {
float * out = (float*)PyArray_GETPTR2(pyfiltered, k, 0);
/* for each layer */
for (l = 0; l < 32; ++l) {
float weight = *(float*)PyArray_GETPTR3(pyfilter, i, j, l);
float * in = (float*)PyArray_GETPTR3(pyfeatures, i+k, j, l);
cblas_saxpy(tight_width, weight, in, stride_src, out, stride_dst);
}
}
}
}
for (a = tight_height; a < filtered_dims[0]; ++a) {
for (b = 0; b < filtered_dims[1]; ++b) {
*(float*)PyArray_GETPTR2(pyfiltered, a, b) = -INFINITY;
}
}
for (a = 0; a < tight_height; ++a) {
for (b = tight_width; b < filtered_dims[1]; ++b) {
*(float*)PyArray_GETPTR2(pyfiltered, a, b) = -INFINITY;
}
}
return Py_BuildValue("N", pyfiltered);
}
static PyObject * DeformationCost(PyObject * self, PyObject * args)
{
PyArrayObject * pydata;
float ax = 0.0f, bx = 0.0f, ay = 0.0f, by = 0.0f;
int s = 0;
if (!PyArg_ParseTuple(args, "O!ffffi", &PyArray_Type, &pydata, &ax, &bx, &ay, &by, &s))
return NULL;
return deformation_cost(pydata, ax, bx, ay, by, s);
}
static PyObject * FilterImage(PyObject * self, PyObject * args)
{
PyArrayObject * pyfeatures;
PyArrayObject * pyfilter;
float bias = 0.0f;
int width = 0;
int height = 0;
if (!PyArg_ParseTuple(args, "O!O!|fii", &PyArray_Type, &pyfeatures, &PyArray_Type, &pyfilter, &bias, &width, &height))
return NULL;
return filter_image(pyfeatures, pyfilter, bias, width, height);
}
static PyObject * FilterImages(PyObject * self, PyObject * args)
{
PyObject * pyfeatures_list;
PyObject * pydims_list = NULL;
PyArrayObject * pyfilter;
float bias = 0.0f;
int numfilters;
int numdims = 0;
int i;
PyObject ** objs = NULL;
PyObject ** results = NULL;
PyObject * pyresults_list;
if (!PyArg_ParseTuple(args, "O!O!|fO!", &PyList_Type, &pyfeatures_list, &PyArray_Type, &pyfilter, &bias, &PyList_Type, &pydims_list))
return NULL;
numfilters = PyList_Size(pyfeatures_list);
if (pydims_list) {
numdims = PyList_Size(pydims_list);
}
if (numdims && numdims != numfilters) {
PyErr_SetString(PyExc_TypeError, "If pad dims are specified, then it must be the same length as the features list.");
return NULL;
}
objs = (PyObject**)calloc(numfilters, sizeof(PyObject*));
int* widths = (int*)calloc(numfilters, sizeof(int));
int* heights = (int*)calloc(numfilters, sizeof(int));
results = (PyObject**)calloc(numfilters, sizeof(PyObject*));
for (i = 0; i < numfilters; ++i) {
objs[i] = PyList_GetItem(pyfeatures_list, i);
if (!PyArray_Check(objs[i])) {
free(objs);
free(widths);
free(heights);
free(results);
PyErr_SetString(PyExc_TypeError, "Must contain a list of numpy arrays.");
return NULL;
}
if (pydims_list) {
PyObject * dims = PyList_GetItem(pydims_list, i);
if (!PyTuple_Check(dims) || 2 != PyTuple_Size(dims)) {
free(objs);
free(widths);
free(heights);
free(results);
PyErr_SetString(PyExc_TypeError, "Must contain a list of tuples.");
return NULL;
}
heights[i] = PyInt_AsLong(PyTuple_GetItem(dims, 0));
widths[i] = PyInt_AsLong(PyTuple_GetItem(dims, 1));
} else {
widths[i] = 0;
heights[i] = 0;
}
}
kmp_set_blocktime(0);
#pragma omp parallel for schedule(dynamic)
for (i = 0; i < numfilters; ++i) {
PyArrayObject * pyfeatures = (PyArrayObject*)objs[i];
results[i] = filter_image(pyfeatures, pyfilter, bias, widths[i], heights[i]);
}
free(objs);
free(widths);
free(heights);
pyresults_list = PyList_New(numfilters);
for (i = 0; i < numfilters; ++i) {
PyList_SetItem(pyresults_list, i, results[i]);
}
free(results);
return Py_BuildValue("N", pyresults_list);
}
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"_detection",
"Native convolution detection routine.",
-1,
NULL,
NULL,
NULL,
NULL,
NULL
};
#endif
#if PY_MAJOR_VERSION < 3
static PyMethodDef _detection_methods[] = {
{"FilterImage", FilterImage, METH_VARARGS, "Compute a 2D cross correlation between a filter and image features. Optionally add bias term."},
{"FilterImages", FilterImages, METH_VARARGS, "Compute a 2D cross correlation between a filter and several image features in parallel. Optionally add bias term."},
{"DeformationCost", DeformationCost, METH_VARARGS, "Compute a fast bounded distance transform for the deformation cost."},
{NULL}
};
#endif
#if PY_MAJOR_VERSION >= 3
PyMODINIT_FUNC
PyInit__detection(void)
#else
PyMODINIT_FUNC
init_detection(void)
#endif
{
import_array();
#if PY_MAJOR_VERSION >= 3
PyObject *m = PyModule_Create(&moduledef);
#else
Py_InitModule3("_detection", _detection_methods, "Native convolution detection routine.");
#endif
#if PY_MAJOR_VERSION >= 3
return m;
#endif
}
|
maxpool_layer.c | #include "maxpool_layer.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "gemm.h"
#include <stdio.h>
image get_maxpool_image(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
int c = l.c;
return float_to_image(w,h,c,l.output);
}
image get_maxpool_delta(maxpool_layer l)
{
int h = l.out_h;
int w = l.out_w;
int c = l.c;
return float_to_image(w,h,c,l.delta);
}
void cudnn_maxpool_setup(layer *l)
{
#ifdef CUDNN
cudnnStatus_t maxpool_status;
maxpool_status = cudnnCreatePoolingDescriptor(&l->poolingDesc);
maxpool_status = cudnnSetPooling2dDescriptor(
l->poolingDesc,
CUDNN_POOLING_MAX,
CUDNN_NOT_PROPAGATE_NAN, // CUDNN_PROPAGATE_NAN, CUDNN_NOT_PROPAGATE_NAN
l->size,
l->size,
l->pad/2, //0, //l.pad,
l->pad/2, //0, //l.pad,
l->stride_x,
l->stride_y);
cudnnCreateTensorDescriptor(&l->srcTensorDesc);
cudnnCreateTensorDescriptor(&l->dstTensorDesc);
cudnnSetTensor4dDescriptor(l->srcTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->c, l->h, l->w);
cudnnSetTensor4dDescriptor(l->dstTensorDesc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, l->batch, l->out_c, l->out_h, l->out_w);
#endif // CUDNN
}
maxpool_layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride_x, int stride_y, int padding, int maxpool_depth, int out_channels, int antialiasing)
{
maxpool_layer l = { (LAYER_TYPE)0 };
l.type = MAXPOOL;
const int blur_stride_x = stride_x;
const int blur_stride_y = stride_y;
l.antialiasing = antialiasing;
if (antialiasing) {
stride_x = stride_y = l.stride = l.stride_x = l.stride_y = 1; // use stride=1 in host-layer
}
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.maxpool_depth = maxpool_depth;
l.out_channels = out_channels;
if (maxpool_depth) {
l.out_c = out_channels;
l.out_w = l.w;
l.out_h = l.h;
}
else {
l.out_w = (w + padding - size) / stride_x + 1;
l.out_h = (h + padding - size) / stride_y + 1;
l.out_c = c;
}
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride_x;
l.stride_x = stride_x;
l.stride_y = stride_y;
int output_size = l.out_h * l.out_w * l.out_c * batch;
l.indexes = (int*)calloc(output_size, sizeof(int));
l.output = (float*)calloc(output_size, sizeof(float));
l.delta = (float*)calloc(output_size, sizeof(float));
l.forward = forward_maxpool_layer;
l.backward = backward_maxpool_layer;
#ifdef GPU
l.forward_gpu = forward_maxpool_layer_gpu;
l.backward_gpu = backward_maxpool_layer_gpu;
l.indexes_gpu = cuda_make_int_array(output_size);
l.output_gpu = cuda_make_array(l.output, output_size);
l.delta_gpu = cuda_make_array(l.delta, output_size);
cudnn_maxpool_setup(&l);
#endif // GPU
l.bflops = (l.size*l.size*l.c * l.out_h*l.out_w) / 1000000000.;
if (maxpool_depth)
fprintf(stderr, "max-depth %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
else if(stride_x == stride_y)
fprintf(stderr, "max %2dx%2d/%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
else
fprintf(stderr, "max %2dx%2d/%2dx%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BF\n", size, size, stride_x, stride_y, w, h, c, l.out_w, l.out_h, l.out_c, l.bflops);
if (l.antialiasing) {
printf("AA: ");
l.input_layer = (layer*)calloc(1, sizeof(layer));
const int blur_size = 3;
*(l.input_layer) = make_convolutional_layer(batch, 1, l.out_h, l.out_w, l.out_c, l.out_c, l.out_c, blur_size, blur_stride_x, blur_stride_y, 1, blur_size / 2, LINEAR, 0, 0, 0, 0, 0, 1, 0, NULL, 0);
const int blur_nweights = l.out_c * blur_size * blur_size; // (n / n) * n * blur_size * blur_size;
int i;
for (i = 0; i < blur_nweights; i += (blur_size*blur_size)) {
/*
l.input_layer->weights[i + 0] = 0;
l.input_layer->weights[i + 1] = 0;
l.input_layer->weights[i + 2] = 0;
l.input_layer->weights[i + 3] = 0;
l.input_layer->weights[i + 4] = 1;
l.input_layer->weights[i + 5] = 0;
l.input_layer->weights[i + 6] = 0;
l.input_layer->weights[i + 7] = 0;
l.input_layer->weights[i + 8] = 0;
*/
l.input_layer->weights[i + 0] = 1 / 16.f;
l.input_layer->weights[i + 1] = 2 / 16.f;
l.input_layer->weights[i + 2] = 1 / 16.f;
l.input_layer->weights[i + 3] = 2 / 16.f;
l.input_layer->weights[i + 4] = 4 / 16.f;
l.input_layer->weights[i + 5] = 2 / 16.f;
l.input_layer->weights[i + 6] = 1 / 16.f;
l.input_layer->weights[i + 7] = 2 / 16.f;
l.input_layer->weights[i + 8] = 1 / 16.f;
}
for (i = 0; i < l.out_c; ++i) l.input_layer->biases[i] = 0;
#ifdef GPU
l.input_antialiasing_gpu = cuda_make_array(NULL, l.batch*l.outputs);
push_convolutional_layer(*(l.input_layer));
#endif // GPU
}
return l;
}
void resize_maxpool_layer(maxpool_layer *l, int w, int h)
{
l->h = h;
l->w = w;
l->inputs = h*w*l->c;
l->out_w = (w + l->pad - l->size) / l->stride_x + 1;
l->out_h = (h + l->pad - l->size) / l->stride_y + 1;
l->outputs = l->out_w * l->out_h * l->out_c;
int output_size = l->outputs * l->batch;
l->indexes = (int*)realloc(l->indexes, output_size * sizeof(int));
l->output = (float*)realloc(l->output, output_size * sizeof(float));
l->delta = (float*)realloc(l->delta, output_size * sizeof(float));
#ifdef GPU
CHECK_CUDA(cudaFree((float *)l->indexes_gpu));
CHECK_CUDA(cudaFree(l->output_gpu));
CHECK_CUDA(cudaFree(l->delta_gpu));
l->indexes_gpu = cuda_make_int_array(output_size);
l->output_gpu = cuda_make_array(l->output, output_size);
l->delta_gpu = cuda_make_array(l->delta, output_size);
cudnn_maxpool_setup(l);
#endif
}
void forward_maxpool_layer(const maxpool_layer l, network_state state)
{
if (l.maxpool_depth)
{
int b, i, j, k, g;
for (b = 0; b < l.batch; ++b) {
#pragma omp parallel for
for (i = 0; i < l.h; ++i) {
for (j = 0; j < l.w; ++j) {
for (g = 0; g < l.out_c; ++g)
{
int out_index = j + l.w*(i + l.h*(g + l.out_c*b));
float max = -FLT_MAX;
int max_i = -1;
for (k = g; k < l.c; k += l.out_c)
{
int in_index = j + l.w*(i + l.h*(k + l.c*b));
float val = state.input[in_index];
max_i = (val > max) ? in_index : max_i;
max = (val > max) ? val : max;
}
l.output[out_index] = max;
l.indexes[out_index] = max_i;
}
}
}
}
return;
}
if (!state.train && l.stride_x == l.stride_y) {
forward_maxpool_layer_avx(state.input, l.output, l.indexes, l.size, l.w, l.h, l.out_w, l.out_h, l.c, l.pad, l.stride, l.batch);
}
else {
int b, i, j, k, m, n;
int w_offset = -l.pad / 2;
int h_offset = -l.pad / 2;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for (b = 0; b < l.batch; ++b) {
for (k = 0; k < c; ++k) {
for (i = 0; i < h; ++i) {
for (j = 0; j < w; ++j) {
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < l.size; ++n) {
for (m = 0; m < l.size; ++m) {
int cur_h = h_offset + i*l.stride_y + n;
int cur_w = w_offset + j*l.stride_x + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? state.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
l.indexes[out_index] = max_i;
}
}
}
}
}
if (l.antialiasing) {
network_state s = { 0 };
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.input = l.output;
forward_convolutional_layer(*(l.input_layer), s);
//simple_copy_ongpu(l.outputs*l.batch, l.output, l.input_antialiasing);
memcpy(l.output, l.input_layer->output, l.input_layer->outputs * l.input_layer->batch * sizeof(float));
}
}
void backward_maxpool_layer(const maxpool_layer l, network_state state)
{
int i;
int h = l.out_h;
int w = l.out_w;
int c = l.out_c;
#pragma omp parallel for
for(i = 0; i < h*w*c*l.batch; ++i){
int index = l.indexes[i];
state.delta[index] += l.delta[i];
}
}
|
kpoint.c | /* kpoint.c */
/* Copyright (C) 2008 Atsushi Togo */
#include <stdio.h>
#include <stdlib.h>
#include "mathfunc.h"
#include "symmetry.h"
#include "kpoint.h"
#include "debug.h"
/* #define QXYZ */
/* The addressing order of mesh grid is defined as running left */
/* element first. But when QXYZ is defined, it is changed to right */
/* element first. */
static PointSymmetry get_point_group_reciprocal( SPGCONST double lattice[3][3],
const MatINT * rotations,
const int is_time_reversal,
const double symprec );
static PointSymmetry get_point_group_reciprocal_with_q( SPGCONST PointSymmetry * pointgroup,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3] );
static int get_ir_kpoints( int map[],
SPGCONST double kpoints[][3],
const int num_kpoint,
SPGCONST PointSymmetry * point_symmetry,
const double symprec );
static int get_ir_reciprocal_mesh( int grid_point[][3],
int map[],
const int mesh[3],
const int is_shift[3],
SPGCONST PointSymmetry * point_symmetry );
static Triplets * get_ir_triplets( const int mesh[3],
const int is_time_reversal,
SPGCONST double lattice[3][3],
const MatINT * rotations,
const double symprec );
static int get_ir_triplets_with_q( int weights[],
int grid_points[][3],
int third_q[],
const int grid_point,
const int mesh[3],
PointSymmetry * pointgroup,
const double symprec );
static int extract_ir_triplets_with_q( int triplets_with_q[][3],
int weight_with_q[],
const int fixed_grid_number,
SPGCONST int triplets[][3],
const int num_triplets,
const int mesh[3],
SPGCONST PointSymmetry * point_symmetry );
static void get_grid_mapping_table( int **map_sym,
SPGCONST PointSymmetry * point_symmetry,
const int mesh[3],
const int is_shift[3] );
static void address_to_grid( int grid_double[3],
const int address,
const int mesh[3],
const int is_shift[3] );
static void get_grid_points( int grid_point[3],
const int grid[3],
const int mesh[3] );
static void get_vector_modulo( int v[3],
const int m[3] );
static int grid_to_address( const int grid[3],
const int mesh[3],
const int is_shift[3] );
static void free_array2D_int( int **array,
const int num_row );
static int ** allocate_array2d_int( const int num_row,
const int num_column );
static Triplets * allocate_triplets( const int num_triplets, const int mesh[3] );
int kpt_get_irreducible_kpoints( int map[],
SPGCONST double kpoints[][3],
const int num_kpoint,
SPGCONST double lattice[3][3],
const Symmetry * symmetry,
const int is_time_reversal,
const double symprec )
{
int i;
PointSymmetry point_symmetry;
MatINT *rotations;
rotations = mat_alloc_MatINT( symmetry->size );
for ( i = 0; i < symmetry->size; i++ ) {
mat_copy_matrix_i3( rotations->mat[i], symmetry->rot[i] );
}
point_symmetry = get_point_group_reciprocal( lattice,
rotations,
is_time_reversal,
symprec );
mat_free_MatINT( rotations );
return get_ir_kpoints(map, kpoints, num_kpoint, &point_symmetry, symprec);
}
/* grid_point (e.g. 4x4x4 mesh) */
/* [[ 0 0 0] */
/* [ 1 0 0] */
/* [ 2 0 0] */
/* [-1 0 0] */
/* [ 0 1 0] */
/* [ 1 1 0] */
/* [ 2 1 0] */
/* [-1 1 0] */
/* .... ] */
/* */
/* Each value of 'map' correspnds to the index of grid_point. */
int kpt_get_irreducible_reciprocal_mesh( int grid_points[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
SPGCONST double lattice[3][3],
const Symmetry * symmetry,
const double symprec )
{
int i;
PointSymmetry point_symmetry;
MatINT *rotations;
rotations = mat_alloc_MatINT( symmetry->size );
for ( i = 0; i < symmetry->size; i++ ) {
mat_copy_matrix_i3( rotations->mat[i], symmetry->rot[i] );
}
point_symmetry = get_point_group_reciprocal( lattice,
rotations,
is_time_reversal,
symprec );
mat_free_MatINT( rotations );
return get_ir_reciprocal_mesh( grid_points,
map,
mesh,
is_shift,
&point_symmetry );
}
void kpt_free_triplets( Triplets * t )
{
free( t->triplets );
t->triplets = NULL;
free( t->weights );
t->weights = NULL;
free( t->mesh_points );
t->mesh_points = NULL;
free( t );
t = NULL;
}
int kpt_get_stabilized_reciprocal_mesh( int grid_points[][3],
int map[],
const int mesh[3],
const int is_shift[3],
const int is_time_reversal,
SPGCONST double lattice[3][3],
const MatINT * rotations,
const int num_q,
SPGCONST double qpoints[][3],
const double symprec )
{
PointSymmetry pointgroup, pointgroup_q;
pointgroup = get_point_group_reciprocal( lattice,
rotations,
is_time_reversal,
symprec );
pointgroup_q = get_point_group_reciprocal_with_q( &pointgroup,
symprec,
num_q,
qpoints );
return get_ir_reciprocal_mesh( grid_points,
map,
mesh,
is_shift,
&pointgroup_q );
}
Triplets * kpt_get_triplets_reciprocal_mesh( const int mesh[3],
const int is_time_reversal,
SPGCONST double lattice[3][3],
const MatINT * rotations,
const double symprec )
{
return get_ir_triplets( mesh,
is_time_reversal,
lattice,
rotations,
symprec );
}
int kpt_get_ir_triplets_at_q( int weights[],
int grid_points[][3],
int third_q[],
const int grid_point,
const int mesh[3],
const int is_time_reversal,
SPGCONST double lattice[3][3],
const MatINT * rotations,
const double symprec )
{
PointSymmetry pointgroup;
pointgroup = get_point_group_reciprocal( lattice,
rotations,
is_time_reversal,
symprec );
return get_ir_triplets_with_q( weights,
grid_points,
third_q,
grid_point,
mesh,
&pointgroup,
symprec );
}
int kpt_extract_triplets_reciprocal_mesh_at_q( int triplets_with_q[][3],
int weight_with_q[],
const int fixed_grid_number,
const int num_triplets,
SPGCONST int triplets[][3],
const int weight[],
const int mesh[3],
const int is_time_reversal,
SPGCONST double lattice[3][3],
const MatINT * rotations,
const double symprec )
{
PointSymmetry point_group;
point_group = get_point_group_reciprocal( lattice,
rotations,
is_time_reversal,
symprec );
return extract_ir_triplets_with_q( triplets_with_q,
weight_with_q,
fixed_grid_number,
triplets,
num_triplets,
mesh,
&point_group );
}
/* qpoints are used to find stabilizers (operations). */
/* num_q is the number of the qpoints. */
static PointSymmetry get_point_group_reciprocal( SPGCONST double lattice[3][3],
const MatINT * rotations,
const int is_time_reversal,
const double symprec )
{
int i, j, num_pt = 0;
double volume;
double rot_d[3][3], lat_inv[3][3], glat[3][3], tmp_mat[3][3], grot_d[3][3];
MatINT *rot_reciprocal;
PointSymmetry point_symmetry;
SPGCONST int inversion[3][3] = {
{-1, 0, 0 },
{ 0,-1, 0 },
{ 0, 0,-1 }
};
if ( is_time_reversal ) {
rot_reciprocal = mat_alloc_MatINT( rotations->size * 2 );
} else {
rot_reciprocal = mat_alloc_MatINT( rotations->size );
}
volume = mat_get_determinant_d3(lattice);
mat_inverse_matrix_d3(lat_inv, lattice, symprec);
mat_transpose_matrix_d3(glat, lat_inv);
mat_multiply_matrix_d3(tmp_mat, lat_inv, glat);
for ( i = 0; i < rotations->size; i++ ) {
mat_cast_matrix_3i_to_3d( rot_d, rotations->mat[ i ] );
mat_get_similar_matrix_d3( grot_d, rot_d, tmp_mat,
symprec / volume / volume );
mat_cast_matrix_3d_to_3i( rot_reciprocal->mat[ i ], grot_d );
if ( is_time_reversal ) {
mat_multiply_matrix_i3( rot_reciprocal->mat[ rotations->size+i ],
inversion,
rot_reciprocal->mat[ i ] );
}
}
for ( i = 0; i < rot_reciprocal->size; i++ ) {
for ( j = 0; j < num_pt; j++ ) {
if ( mat_check_identity_matrix_i3( point_symmetry.rot[ j ],
rot_reciprocal->mat[ i ] ) ) {
goto escape;
}
}
mat_copy_matrix_i3( point_symmetry.rot[ num_pt ],
rot_reciprocal->mat[ i ] );
num_pt++;
escape:
;
}
point_symmetry.size = num_pt;
mat_free_MatINT( rot_reciprocal );
return point_symmetry;
}
static PointSymmetry get_point_group_reciprocal_with_q( SPGCONST PointSymmetry * pointgroup,
const double symprec,
const int num_q,
SPGCONST double qpoints[][3] )
{
int i, j, k, l, is_all_ok=0, num_ptq = 0;
double q_rot[3], diff[3];
PointSymmetry pointgroup_q;
for ( i = 0; i < pointgroup->size; i++ ) {
for ( j = 0; j < num_q; j++ ) {
is_all_ok = 0;
mat_multiply_matrix_vector_id3( q_rot,
pointgroup->rot[ i ],
qpoints[ j ] );
for ( k = 0; k < num_q; k++ ) {
for ( l = 0; l < 3; l++ ) {
diff[l] = q_rot[l] - qpoints[k][l];
diff[l] -= mat_Nint( diff[l] );
}
if ( mat_Dabs( diff[0] ) < symprec &&
mat_Dabs( diff[1] ) < symprec &&
mat_Dabs( diff[2] ) < symprec ) {
is_all_ok = 1;
break;
}
}
if ( ! is_all_ok ) {
break;
}
}
if ( is_all_ok ) {
mat_copy_matrix_i3( pointgroup_q.rot[ num_ptq ], pointgroup->rot[ i ] );
num_ptq++;
}
}
pointgroup_q.size = num_ptq;
return pointgroup_q;
}
static int get_ir_kpoints( int map[],
SPGCONST double kpoints[][3],
const int num_kpoint,
SPGCONST PointSymmetry * point_symmetry,
const double symprec )
{
int i, j, k, l, num_ir_kpoint = 0, is_found;
int *ir_map;
double kpt_rot[3], diff[3];
ir_map = (int*)malloc(num_kpoint*sizeof(int));
for ( i = 0; i < num_kpoint; i++ ) {
map[i] = i;
is_found = 1;
for ( j = 0; j < point_symmetry->size; j++ ) {
mat_multiply_matrix_vector_id3(kpt_rot, point_symmetry->rot[j], kpoints[i]);
for ( k = 0; k < 3; k++ ) {
diff[k] = kpt_rot[k] - kpoints[i][k];
diff[k] = diff[k] - mat_Nint(diff[k]);
}
if ( mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec ) {
continue;
}
for ( k = 0; k < num_ir_kpoint; k++ ) {
mat_multiply_matrix_vector_id3(kpt_rot, point_symmetry->rot[j], kpoints[i]);
for ( l = 0; l < 3; l++ ) {
diff[l] = kpt_rot[l] - kpoints[ir_map[k]][l];
diff[l] = diff[l] - mat_Nint(diff[l]);
}
if ( mat_Dabs(diff[0]) < symprec &&
mat_Dabs(diff[1]) < symprec &&
mat_Dabs(diff[2]) < symprec ) {
is_found = 0;
map[i] = ir_map[k];
break;
}
}
if ( ! is_found )
break;
}
if ( is_found ) {
ir_map[num_ir_kpoint] = i;
num_ir_kpoint++;
}
}
free( ir_map );
ir_map = NULL;
return num_ir_kpoint;
}
static int get_ir_reciprocal_mesh( int grid[][3],
int map[],
const int mesh[3],
const int is_shift[3],
SPGCONST PointSymmetry * point_symmetry )
{
/* In the following loop, mesh is doubled. */
/* Even and odd mesh numbers correspond to */
/* is_shift[i] = 0 and 1, respectively. */
/* is_shift = [0,0,0] gives Gamma center mesh. */
/* grid: reducible grid points */
/* map: the mapping from each point to ir-point. */
int i, j, k, l, address, address_rot, num_ir = 0;
int grid_double[3], grid_rot[3], mesh_double[3];
for ( i = 0; i < 3; i++ ) {
mesh_double[i] = mesh[i] * 2;
}
/* "-1" means the element is not touched yet. */
for ( i = 0; i < mesh[0] * mesh[1] * mesh[2]; i++ ) {
map[i] = -1;
}
#ifndef QXYZ
for ( i = 0; i < mesh_double[2]; i++ ) {
if ( ( is_shift[2] && i % 2 == 0 ) ||
( is_shift[2] == 0 && i % 2 != 0 ) )
continue;
for ( j = 0; j < mesh_double[1]; j++ ) {
if ( ( is_shift[1] && j % 2 == 0 ) ||
( is_shift[1] == 0 && j % 2 != 0 ) )
continue;
for ( k = 0; k < mesh_double[0]; k++ ) {
if ( ( is_shift[0] && k % 2 == 0 ) ||
( is_shift[0] == 0 && k % 2 != 0 ) )
continue;
grid_double[0] = k;
grid_double[1] = j;
grid_double[2] = i;
#else
for ( i = 0; i < mesh_double[0]; i++ ) {
if ( ( is_shift[0] && i % 2 == 0 ) ||
( is_shift[0] == 0 && i % 2 != 0 ) )
continue;
for ( j = 0; j < mesh_double[1]; j++ ) {
if ( ( is_shift[1] && j % 2 == 0 ) ||
( is_shift[1] == 0 && j % 2 != 0 ) )
continue;
for ( k = 0; k < mesh_double[2]; k++ ) {
if ( ( is_shift[2] && k % 2 == 0 ) ||
( is_shift[2] == 0 && k % 2 != 0 ) )
continue;
grid_double[0] = i;
grid_double[1] = j;
grid_double[2] = k;
#endif
address = grid_to_address( grid_double, mesh, is_shift );
get_grid_points(grid[ address ], grid_double, mesh);
for ( l = 0; l < point_symmetry->size; l++ ) {
mat_multiply_matrix_vector_i3( grid_rot, point_symmetry->rot[l], grid_double );
get_vector_modulo(grid_rot, mesh_double);
address_rot = grid_to_address( grid_rot, mesh, is_shift );
if ( address_rot > -1 ) { /* Invalid if even --> odd or odd --> even */
if ( map[ address_rot ] > -1 ) {
map[ address ] = map[ address_rot ];
break;
}
}
}
/* Set itself to the map when equivalent point */
/* with smaller numbering could not be found. */
if ( map[ address ] == -1 ) {
map[ address ] = address;
num_ir++;
}
}
}
}
return num_ir;
}
/* Unique q-point triplets that conserve the momentum, */
/* q+q'+q''=G, are obtained. */
/* */
/* The first q-point is selected among the ir-q-points. */
/* The second q-point is selected among the ir-q-points */
/* constrained by the first q-point (stabilizer) */
/* The third q-point is searched through the all grid */
/* points and is checked if it satisfies q+q'+q''=G, */
/* here q, q', and q'' can be exchanged one another. */
static Triplets * get_ir_triplets( const int mesh[3],
const int is_time_reversal,
SPGCONST double lattice[3][3],
const MatINT * rotations,
const double symprec )
{
int i, j, k, l, num_ir, num_grid, weight, weight_q, count, q_2;
int num_triplets, num_unique_q;
int mesh_double[3], address[3], is_shift[3];
int grid_double[3][3];
int (*grid)[3], (*grid_local)[3];
int *map, *map_q, *unique_q;
int **map_sym = NULL;
int **weight_counts;
double stabilizer_q[1][3];
PointSymmetry point_symmetry, point_symmetry_q;
Triplets * tps;
const int index_exchange[6][3] = { { 0, 1, 2 },
{ 2, 0, 1 },
{ 1, 2, 0 },
{ 2, 1, 0 },
{ 0, 2, 1 },
{ 1, 0, 2 } };
num_grid = mesh[0] * mesh[1] * mesh[2];
map = (int*) malloc( num_grid * sizeof(int) );
unique_q = (int*) malloc( num_grid * sizeof(int) );
grid = (int (*)[3]) malloc( sizeof(int[3]) * num_grid );
point_symmetry = get_point_group_reciprocal( lattice,
rotations,
is_time_reversal,
symprec );
/* Only consider the gamma-point */
for ( i = 0; i < 3; i++ ) {
is_shift[i] = 0;
}
num_ir = get_ir_reciprocal_mesh( grid,
map,
mesh,
is_shift,
&point_symmetry );
weight_counts = allocate_array2d_int( num_ir, num_grid );
for ( i = 0; i < num_ir; i++ ) {
for ( j = 0; j < num_grid; j++ ) {
weight_counts[i][j] = 0;
}
}
for ( i = 0; i < 3; i++ ) {
mesh_double[i] = mesh[i] * 2;
}
/* Prepare triplet mapping table to enhance speed of query */
/* 'unique_q' numbering is prepared for saving memory space */
num_unique_q = 0;
for ( i = 0; i < num_grid; i++ ) {
if ( i == map[i] ) {
unique_q[i] = num_unique_q;
num_unique_q++;
}
else {
unique_q[i] = unique_q[map[i]];
}
}
/* Prepare grid point mapping table */
map_sym = allocate_array2d_int( point_symmetry.size, num_grid );
get_grid_mapping_table( map_sym,
&point_symmetry,
mesh,
is_shift );
/* Search triplets without considersing combination */
/* #pragma omp parallel for private( j, k, l, grid_double, point_symmetry_q, stabilizer_q, weight_q, grid_local, address, map_q, weight ) */
for ( i = 0; i < num_grid; i++ ) {
if ( ! ( i == map[ i ] ) ) {
continue;
}
weight = 0;
for ( j = 0; j < num_grid; j++ ) {
if ( i == map[j] ) {
weight++;
}
}
/* Search irreducible q-points (map_q) with a stabilizer */
address_to_grid( grid_double[0], i, mesh, is_shift ); /* q */
for ( j = 0; j < 3; j++ ) {
stabilizer_q[0][j] = (double)grid_double[0][j] / mesh_double[j];
}
point_symmetry_q = get_point_group_reciprocal_with_q( &point_symmetry,
symprec,
1,
stabilizer_q );
grid_local = (int (*)[3]) malloc( sizeof(int[3]) * num_grid );
map_q = (int*) malloc( num_grid * sizeof(int) );
get_ir_reciprocal_mesh( grid_local,
map_q,
mesh,
is_shift,
&point_symmetry_q);
free( grid_local );
grid_local = NULL;
for ( j = 0; j < num_grid; j++ ) {
if ( ! ( j == map_q[ j ] ) ) {
continue;
}
weight_q = 0;
for ( k = 0; k < num_grid; k++ ) {
if ( j == map_q[k] ) {
weight_q++;
}
}
address_to_grid( grid_double[1], j, mesh, is_shift ); /* q' */
for ( k = 0; k < 3; k++ ) { /* q'' */
grid_double[2][k] = - grid_double[0][k] - grid_double[1][k];
}
get_vector_modulo( grid_double[2], mesh_double );
q_2 = grid_to_address( grid_double[2], mesh, is_shift );
/* Look for irreducible triplets exchanging three q-points */
/* and equivalent by symmetry rotations */
for ( k = 0; k < point_symmetry.size; k++ ) {
/* Index exchange */
for ( l = 0; l < 6; l++ ) {
/* Rotated grid point addresses with index exchange */
address[index_exchange[l][0]] = map_sym[k][ i ];
address[index_exchange[l][1]] = map_sym[k][ j ];
address[index_exchange[l][2]] = map_sym[k][ q_2 ];
/* address[0] has to be one of ir-q-points. */
if ( address[0] == map[ address[0] ] ) {
/* Is the set of ddress[0] and address[1] already found? */
if ( weight_counts[ unique_q[ address[0] ] ][ address[1] ] ) {
weight_counts[ unique_q[ address[0] ] ][ address[1] ] += \
weight * weight_q;
goto escape;
}
}
}
}
/* Not found, then this is an irreducible triplet. */
weight_counts[ unique_q[i] ][j] = weight * weight_q;
escape:
;
}
free( map_q );
map_q = NULL;
}
num_triplets = 0;
for ( i = 0; i < num_grid; i++ ) {
if ( ! ( i == map[i] ) ) {
continue;
}
for ( j = 0; j < num_grid; j++ ) {
if ( weight_counts[ unique_q[ i ] ][ j ] ) {
num_triplets++;
}
}
}
tps = allocate_triplets( num_triplets, mesh );
for ( i = 0; i < num_grid; i++ ) {
for ( j = 0; j < 3; j++ ) {
tps->mesh_points[i][j] = grid[i][j];
}
}
count = 0;
for ( i = 0; i < num_grid; i++ ) {
if ( ! ( i == map[i] ) ) {
continue;
}
for ( j = 0; j < num_grid; j++ ) {
if ( weight_counts[ unique_q[ i ] ][ j ] ) {
tps->triplets[count][0] = i;
tps->triplets[count][1] = j;
address_to_grid( grid_double[0], i, mesh, is_shift ); /* q */
address_to_grid( grid_double[1], j, mesh, is_shift ); /* q' */
for ( l = 0; l < 3; l++ ) { /* q'' */
grid_double[2][l] = - grid_double[0][l] - grid_double[1][l];
}
get_vector_modulo( grid_double[2], mesh_double );
tps->triplets[count][2] = grid_to_address( grid_double[2], mesh, is_shift );
tps->weights[count] = weight_counts[ unique_q[ i ] ][ j ];
count++;
}
}
}
free_array2D_int( map_sym, point_symmetry.size );
free_array2D_int( weight_counts, num_ir );
free( map );
map = NULL;
free( unique_q );
unique_q = NULL;
free( grid );
grid = NULL;
return tps;
}
static int get_ir_triplets_with_q( int weights[],
int grid_points[][3],
int third_q[],
const int grid_point,
const int mesh[3],
PointSymmetry * pointgroup,
const double symprec )
{
int i, j, k, num_grid, weight_q, q_2, num_ir;
int mesh_double[3], address[3], is_shift[3];
int grid_double[3][3];
int *map_q;
int **map_sym = NULL;
double stabilizer_q[1][3];
PointSymmetry pointgroup_q;
const int index_exchange[6][3] = { { 0, 1, 2 },
{ 2, 0, 1 },
{ 1, 2, 0 },
{ 2, 1, 0 },
{ 0, 2, 1 },
{ 1, 0, 2 } };
num_grid = mesh[0] * mesh[1] * mesh[2];
for ( i = 0; i < 3; i++ ) {
/* Only consider the gamma-point */
is_shift[i] = 0;
mesh_double[i] = mesh[i] * 2;
}
/* Search irreducible q-points (map_q) with a stabilizer */
address_to_grid( grid_double[0], grid_point, mesh, is_shift ); /* q */
for ( i = 0; i < 3; i++ ) {
stabilizer_q[0][i] = (double)grid_double[0][i] / mesh_double[i];
}
pointgroup_q = get_point_group_reciprocal_with_q( pointgroup,
symprec,
1,
stabilizer_q );
map_sym = allocate_array2d_int( pointgroup->size, num_grid );
get_grid_mapping_table( map_sym,
pointgroup,
mesh,
is_shift );
map_q = (int*) malloc( sizeof(int) * num_grid );
get_ir_reciprocal_mesh( grid_points,
map_q,
mesh,
is_shift,
&pointgroup_q );
for ( i = 0; i < num_grid; i++ ) {
weights[i] = 0;
third_q[i] = -1;
}
num_ir = 0;
for ( i = 0; i < num_grid; i++ ) {
if ( ! ( i == map_q[ i ] ) ) {
continue;
}
weight_q = 0;
for ( j = 0; j < num_grid; j++ ) {
if ( i == map_q[j] ) {
weight_q++;
}
}
address_to_grid( grid_double[1], i, mesh, is_shift ); /* q' */
for ( j = 0; j < 3; j++ ) { /* q'' */
grid_double[2][j] = - grid_double[0][j] - grid_double[1][j];
}
get_vector_modulo( grid_double[2], mesh_double );
q_2 = grid_to_address( grid_double[2], mesh, is_shift );
third_q[i] = q_2;
/* Look for irreducible triplets exchanging three q-points */
/* and equivalent by symmetry rotations */
for ( j = 0; j < pointgroup->size; j++ ) {
/* Index exchange */
for ( k = 0; k < 6; k++ ) {
/* Rotated grid point addresses with index exchange */
address[index_exchange[k][0]] = map_sym[j][ grid_point ];
address[index_exchange[k][1]] = map_sym[j][ i ];
address[index_exchange[k][2]] = map_sym[j][ q_2 ];
if ( address[0] == grid_point ) {
/* Is the set of ddress[0] and address[1] already found? */
if ( weights[ address[1] ] ) {
weights[ address[1] ] += weight_q;
goto escape;
}
}
}
}
/* Not found, then this is an irreducible triplet. */
weights[ i ] = weight_q;
num_ir++;
escape:
;
}
free( map_q );
map_q = NULL;
free_array2D_int( map_sym, pointgroup->size );
return num_ir;
}
static int extract_ir_triplets_with_q( int triplets_with_q[][3],
int weight_with_q[],
const int fixed_grid_number,
SPGCONST int triplets[][3],
const int num_triplets,
const int mesh[3],
SPGCONST PointSymmetry *point_symmetry )
{
int i, j, k, sym_num, rest_index, num_triplets_with_q;
int address0, address1, address1_orig, found;
int is_shift[3];
int num_grid;
int **map_sym;
num_grid = mesh[0] * mesh[1] * mesh[2];
map_sym = allocate_array2d_int( point_symmetry->size, num_grid );
/* Only consider the gamma-point */
for ( i = 0; i < 3; i++ ) {
is_shift[i] = 0;
}
/* Prepare mapping tables */
get_grid_mapping_table( map_sym,
point_symmetry,
mesh,
is_shift );
num_triplets_with_q = 0;
for ( i = 0; i < num_triplets; i++ ) {
sym_num = -1;
for ( j = 0; j < point_symmetry->size; j++ ) {
address0 = map_sym[j][fixed_grid_number];
if ( triplets[i][0] == address0 ||
triplets[i][1] == address0 ||
triplets[i][2] == address0 ) {
for ( k = 0; k < num_grid; k++ ) {
address1 = map_sym[j][k];
/* Matching indices 0 and 1 */
if ( ( triplets[i][0] == address0 && triplets[i][1] == address1 ) ||
( triplets[i][1] == address0 && triplets[i][0] == address1 ) ) {
sym_num = j;
rest_index = 2;
address1_orig = k;
break;
}
/* Matching indices 1 and 2 */
if ( ( triplets[i][1] == address0 && triplets[i][2] == address1 ) ||
( triplets[i][2] == address0 && triplets[i][1] == address1 ) ) {
sym_num = j;
rest_index = 0;
address1_orig = k;
break;
}
/* Matching indices 2 and 0 */
if ( ( triplets[i][2] == address0 && triplets[i][0] == address1 ) ||
( triplets[i][0] == address0 && triplets[i][2] == address1 ) ) {
sym_num = j;
rest_index = 1;
address1_orig = k;
break;
}
}
if ( sym_num > -1 ) {
break;
}
}
}
/* Found? */
if ( sym_num > -1 ) {
for ( j = 0; j < num_grid; j++ ) {
if ( map_sym[sym_num][j] == triplets[i][rest_index] ) {
triplets_with_q[num_triplets_with_q][0] = fixed_grid_number;
if ( j > address1_orig ) {
triplets_with_q[num_triplets_with_q][1] = address1_orig;
triplets_with_q[num_triplets_with_q][2] = j;
} else {
triplets_with_q[num_triplets_with_q][2] = address1_orig;
triplets_with_q[num_triplets_with_q][1] = j;
}
num_triplets_with_q++;
break;
}
}
}
}
for ( i = 0; i < num_triplets_with_q; i++ ) {
weight_with_q[i] = 0;
}
for ( i = 0; i < num_grid; i++ ) {
found = 0;
for ( j = 0; j < num_triplets_with_q; j++ ) {
for ( k = 0; k < point_symmetry->size; k++ ) {
if ( map_sym[k][fixed_grid_number] == triplets_with_q[j][0] ) {
if ( map_sym[k][i] == triplets_with_q[j][1] ||
map_sym[k][i] == triplets_with_q[j][2] ) {
weight_with_q[j]++;
found = 1;
break;
}
}
if ( map_sym[k][fixed_grid_number] == triplets_with_q[j][1] ) {
if ( map_sym[k][i] == triplets_with_q[j][2] ||
map_sym[k][i] == triplets_with_q[j][0] ) {
weight_with_q[j]++;
found = 1;
break;
}
}
if ( map_sym[k][fixed_grid_number] == triplets_with_q[j][2] ) {
if ( map_sym[k][i] == triplets_with_q[j][0] ||
map_sym[k][i] == triplets_with_q[j][1] ) {
weight_with_q[j]++;
found = 1;
break;
}
}
}
if ( found ) {
break;
}
}
if ( ! found ) {
warning_print("spglib: Unexpected behavior in extract_ir_triplets_with_q ");
warning_print("(line %d, %s).\n", __LINE__, __FILE__);
num_triplets_with_q = 0;
break;
}
}
free_array2D_int( map_sym, point_symmetry->size );
return num_triplets_with_q;
}
static void get_grid_mapping_table( int **map_sym,
SPGCONST PointSymmetry *point_symmetry,
const int mesh[3],
const int is_shift[3] )
{
int i, j;
int grid_rot[3], grid_double[3], mesh_double[3];
for ( i = 0; i < 3; i++ ) {
mesh_double[i] = mesh[i] * 2;
}
for ( i = 0; i < point_symmetry->size; i++ ) {
for ( j = 0; j < mesh[0]*mesh[1]*mesh[2]; j++ ) {
address_to_grid( grid_double, j, mesh, is_shift );
mat_multiply_matrix_vector_i3( grid_rot,
point_symmetry->rot[i],
grid_double );
get_vector_modulo( grid_rot, mesh_double );
map_sym[i][j] = grid_to_address( grid_rot, mesh, is_shift );
}
}
}
static int grid_to_address( const int grid_double[3],
const int mesh[3],
const int is_shift[3] )
{
int i, grid[3];
for ( i = 0; i < 3; i++ ) {
if ( grid_double[i] % 2 == 0 && (! is_shift[i]) ) {
grid[i] = grid_double[i] / 2;
} else {
if ( grid_double[i] % 2 != 0 && is_shift[i] ) {
grid[i] = ( grid_double[i] - 1 ) / 2;
} else {
return -1;
}
}
}
#ifndef QXYZ
return grid[2] * mesh[0] * mesh[1] + grid[1] * mesh[0] + grid[0];
#else
return grid[0] * mesh[1] * mesh[2] + grid[1] * mesh[2] + grid[2];
#endif
}
static void address_to_grid( int grid_double[3],
const int address,
const int mesh[3],
const int is_shift[3] )
{
int i;
int grid[3];
#ifndef QXYZ
grid[2] = address / ( mesh[0] * mesh[1] );
grid[1] = ( address - grid[2] * mesh[0] * mesh[1] ) / mesh[0];
grid[0] = address % mesh[0];
#else
grid[0] = address / ( mesh[1] * mesh[2] );
grid[1] = ( address - grid[0] * mesh[1] * mesh[2] ) / mesh[2];
grid[2] = address % mesh[2];
#endif
for ( i = 0; i < 3; i++ ) {
grid_double[i] = grid[i] * 2 + is_shift[i];
}
}
static void get_grid_points( int grid[3],
const int grid_double[3],
const int mesh[3] )
{
int i;
for ( i = 0; i < 3; i++ ) {
if ( grid_double[i] % 2 == 0 ) {
grid[i] = grid_double[i] / 2;
} else {
grid[i] = ( grid_double[i] - 1 ) / 2;
}
grid[i] = grid[i] - mesh[i] * ( grid[i] > mesh[i] / 2 );
}
}
static void get_vector_modulo( int v[3],
const int m[3] )
{
int i;
for ( i = 0; i < 3; i++ ) {
v[i] = v[i] % m[i];
if ( v[i] < 0 )
v[i] += m[i];
}
}
static void free_array2D_int( int **array,
const int num_row )
{
int i;
for ( i = 0; i < num_row; i++ ) {
free( array[i] );
array[i] = NULL;
}
free( array );
array = NULL;
}
static int ** allocate_array2d_int( const int num_row,
const int num_column )
{
int i;
int **array;
array = (int**) malloc( num_row * sizeof(int*) );
for (i = 0; i < num_row; i++) {
array[i] = (int*) malloc( num_column * sizeof(int) );
}
return array;
}
static Triplets * allocate_triplets( const int num_triplets, const int mesh[3] )
{
int i, num_grid;
Triplets * tps;
num_grid = mesh[0] * mesh[1] * mesh[2];
tps = (Triplets*) malloc( sizeof( Triplets ) );
tps->size = num_triplets;
tps->triplets = (int (*)[3]) malloc( sizeof(int[3]) * num_triplets );
tps->weights = (int*) malloc( sizeof(int) * num_triplets );
tps->mesh_points = (int (*)[3]) malloc( sizeof(int[3]) * num_grid );
for ( i = 0; i < 3; i++ ) {
tps->mesh[i] = mesh[i];
}
return tps;
}
|
calculate_global_physical_properties.h | #ifndef CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H
#define CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H
// /* External includes */
// System includes
// Project includes
#include "utilities/timer.h"
#include "custom_utilities/create_and_destroy.h"
#include "custom_elements/Particle_Contact_Element.h"
#include "includes/variables.h"
/* System includes */
#include <limits>
#include <iostream>
#include <iomanip>
/* External includes */
#ifdef _OPENMP
#include <omp.h>
#endif
/* Project includes */
#include "includes/define.h"
#include "utilities/openmp_utils.h"
namespace Kratos
{
class SphericElementGlobalPhysicsCalculator
{
public:
typedef ModelPart::ElementsContainerType ElementsArrayType;
KRATOS_CLASS_POINTER_DEFINITION(SphericElementGlobalPhysicsCalculator);
/// Default constructor.
SphericElementGlobalPhysicsCalculator(ModelPart& r_model_part)
{
mInitialCenterOfMassAndMass = CalculateCenterOfMass(r_model_part);
mInitialMass = CalculateTotalMass(r_model_part);
}
/// Destructor.
virtual ~SphericElementGlobalPhysicsCalculator(){}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateTotalVolume(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_volume = 0.0;
#pragma omp parallel for reduction(+ : added_volume)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if (it->GetGeometry()[0].Is(BLOCKED)) { // we exclude blocked elements from the volume calculation (e.g., inlet injectors)
continue;
}
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
SphericParticle& r_spheric_particle = dynamic_cast<Kratos::SphericParticle&> (*it);
const double particle_radius = r_spheric_particle.GetRadius();
added_volume += 4.0 / 3.0 * Globals::Pi * particle_radius * particle_radius * particle_radius;
}
}
}
return added_volume;
}
//***************************************************************************************************************
//***************************************************************************************************************
// Returns the minimum value of a double variable in the model part.
double CalculateMaxNodalVariable(ModelPart& r_model_part, const Variable<double>& r_variable) {
ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements();
KRATOS_ERROR_IF(pElements.size() == 0) << "Cannot compute maximum of the required nodal variable. Empty model part. Could not compute the maximum of the required variable " << r_variable << std::endl;
ElementsArrayType::iterator it_begin = pElements.ptr_begin();
KRATOS_ERROR_IF_NOT(it_begin->GetGeometry()[0].SolutionStepsDataHas(r_variable)) << "Cannot compute maximum of the required nodal variable. Missing nodal variable " << r_variable << std::endl;
std::vector<double> max_values;
double max_val = - std::numeric_limits<double>::max();
max_values.resize(OpenMPUtils::GetNumThreads());
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
max_values[k] = max_val;
}
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), pElements.size(), mElementsPartition);
unsigned int elem_counter;
#pragma omp parallel for private(elem_counter)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
elem_counter = mElementsPartition[k];
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
max_values[k] = std::max(max_values[k], (it)->GetGeometry()[0].FastGetSolutionStepValue(r_variable));
elem_counter++;
}
}
// getting the maximum between threads:
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
max_val = std::max(max_val, max_values[k]);
}
return max_val;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateMinNodalVariable(ModelPart& r_model_part, const Variable<double>& r_variable) {
ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements();
KRATOS_ERROR_IF(pElements.size() == 0) << "Cannot compute minimum of the required nodal variable. Empty model part. Could not compute the maximum of the required variable " << r_variable << std::endl;
ElementsArrayType::iterator it_begin = pElements.ptr_begin();
KRATOS_ERROR_IF_NOT(it_begin->GetGeometry()[0].SolutionStepsDataHas(r_variable)) << "Cannot compute minimum of the required nodal variable. Missing variable " << r_variable << std::endl;
std::vector<double> min_values;
double min_val = std::numeric_limits<double>::max();
min_values.resize(OpenMPUtils::GetNumThreads());
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
min_values[k] = min_val;
}
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), pElements.size(), mElementsPartition);
unsigned int elem_counter;
#pragma omp parallel for private(elem_counter)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
elem_counter = mElementsPartition[k];
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
min_values[k] = std::min(min_values[k], (it)->GetGeometry()[0].FastGetSolutionStepValue(r_variable));
elem_counter++;
}
}
// getting the minimum between threads:
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
min_val = std::min(min_val, min_values[k]);
}
return min_val;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateD50(ModelPart& r_model_part)
{
const unsigned int size = r_model_part.GetCommunicator().LocalMesh().Elements().size();
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), size, mElementsPartition);
std::vector<double> radii;
radii.resize(size);
unsigned int particle_counter = 0;
#pragma omp parallel for private(particle_counter)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
particle_counter = mElementsPartition[k];
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
SphericParticle& r_spheric_particle = dynamic_cast<Kratos::SphericParticle&> (*it);
radii[particle_counter] = r_spheric_particle.GetRadius();
particle_counter++;
}
}
if (particle_counter) {
std::sort(radii.begin(), radii.end());
int half = div(size, 2).quot;
bool even = (size%2 == 0);
double d50 = even ? 2 * radii[half] : radii[half] + radii[half + 1];
return d50;
}
else {
return 0.00;
}
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateTotalMass(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(),r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double added_mass = 0.0;
#pragma omp parallel for reduction(+ : added_mass)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
double particle_mass = (it)->GetGeometry()[0].FastGetSolutionStepValue(NODAL_MASS);
added_mass += particle_mass;
}
}
}
return added_mass;
}
//***************************************************************************************************************
//***************************************************************************************************************
array_1d<double, 3> CalculateCenterOfMass(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
const double total_mass_inv = 1 / CalculateTotalMass(r_model_part);
double cm_x = 0.0;
double cm_y = 0.0;
double cm_z = 0.0;
#pragma omp parallel for reduction(+ : cm_x, cm_y, cm_z)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
double particle_mass = (it)->GetGeometry()[0].FastGetSolutionStepValue(NODAL_MASS);
cm_x += particle_mass * (it)->GetGeometry()[0].Coordinates()[0];
cm_y += particle_mass * (it)->GetGeometry()[0].Coordinates()[1];
cm_z += particle_mass * (it)->GetGeometry()[0].Coordinates()[2];
}
}
}
array_1d<double, 3> center_of_mass;
center_of_mass[0] = total_mass_inv * cm_x;
center_of_mass[1] = total_mass_inv * cm_y;
center_of_mass[2] = total_mass_inv * cm_z;
return center_of_mass;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateGravitationalPotentialEnergy(ModelPart& r_model_part, const array_1d<double, 3> reference_point)
{
double gravitational_energy;
const double total_mass = CalculateTotalMass(r_model_part);
if (total_mass == 0) gravitational_energy = 0.0;
else {
const array_1d<double, 3>& gravity = r_model_part.GetProcessInfo()[GRAVITY];
const array_1d<double, 3> center_of_mass = CalculateCenterOfMass(r_model_part);
const array_1d<double, 3> center_of_mass_to_reference = reference_point - center_of_mass;
gravitational_energy = total_mass * (center_of_mass_to_reference[0] * gravity[0] + center_of_mass_to_reference[1] * gravity[1] + center_of_mass_to_reference[2] * gravity[2]);
}
return gravitational_energy;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateTranslationalKinematicEnergy(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double kinematic_energy = 0.0;
#pragma omp parallel for reduction(+ : kinematic_energy)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
double particle_translational_kinematic_energy = 0.0;
(it)->Calculate(PARTICLE_TRANSLATIONAL_KINEMATIC_ENERGY, particle_translational_kinematic_energy, r_model_part.GetProcessInfo());
kinematic_energy += particle_translational_kinematic_energy;
}
}
}
return kinematic_energy;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateRotationalKinematicEnergy(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double rotational_kinematic_energy = 0.0;
#pragma omp parallel for reduction(+ : rotational_kinematic_energy)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
double particle_rotational_kinematic_energy = 0.0;
(it)->Calculate(PARTICLE_ROTATIONAL_KINEMATIC_ENERGY, particle_rotational_kinematic_energy, r_model_part.GetProcessInfo());
rotational_kinematic_energy += particle_rotational_kinematic_energy;
}
}
}
return rotational_kinematic_energy;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateElasticEnergy(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double elastic_energy = 0.0;
#pragma omp parallel for reduction(+ : elastic_energy)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
double particle_elastic_energy = 0.0;
(it)->Calculate(PARTICLE_ELASTIC_ENERGY, particle_elastic_energy, r_model_part.GetProcessInfo());
elastic_energy += particle_elastic_energy;
}
}
}
return elastic_energy;
}
//***************************************************************************************************************
//***************************************************************************************************************
double CalculateInelasticFrictionalEnergy(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double frictional_energy = 0.0;
#pragma omp parallel for reduction(+ : frictional_energy)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
double particle_frictional_energy = 0.0;
(it)->Calculate(PARTICLE_INELASTIC_FRICTIONAL_ENERGY, particle_frictional_energy, r_model_part.GetProcessInfo());
frictional_energy += particle_frictional_energy;
}
}
}
return frictional_energy;
}
double CalculateInelasticViscodampingEnergy(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double viscodamping_energy = 0.0;
#pragma omp parallel for reduction(+ : viscodamping_energy)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
double particle_viscodamping_energy = 0.0;
(it)->Calculate(PARTICLE_INELASTIC_VISCODAMPING_ENERGY, particle_viscodamping_energy, r_model_part.GetProcessInfo());
viscodamping_energy += particle_viscodamping_energy;
}
}
}
return viscodamping_energy;
}
//***************************************************************************************************************
//***************************************************************************************************************
array_1d<double, 3> CalculateTotalMomentum(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double m_x = 0.0;
double m_y = 0.0;
double m_z = 0.0;
#pragma omp parallel for reduction(+ : m_x, m_y, m_z)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
array_1d<double, 3> particle_momentum;
(it)->Calculate(MOMENTUM, particle_momentum, r_model_part.GetProcessInfo());
m_x += particle_momentum[0];
m_y += particle_momentum[1];
m_z += particle_momentum[2];
}
}
}
array_1d<double, 3> momentum;
momentum[0] = m_x;
momentum[1] = m_y;
momentum[2] = m_z;
return momentum;
}
//***************************************************************************************************************
//***************************************************************************************************************
array_1d<double, 3> CalulateTotalAngularMomentum(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(), r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
const array_1d<double, 3> center_of_mass = CalculateCenterOfMass(r_model_part);
double am_x = 0.0;
double am_y = 0.0;
double am_z = 0.0;
#pragma omp parallel for reduction(+ : am_x, am_y, am_z)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); k++){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)) {
array_1d<double, 3> particle_momentum;
array_1d<double, 3> particle_local_angular_momentum;
array_1d<double, 3> center_of_mass_to_particle = (it)->GetGeometry()[0].Coordinates() - center_of_mass;
(it)->Calculate(MOMENTUM, particle_momentum, r_model_part.GetProcessInfo());
(it)->Calculate(ANGULAR_MOMENTUM, particle_local_angular_momentum, r_model_part.GetProcessInfo());
array_1d<double, 3> aux;
Kratos::MathUtils<double>::CrossProduct(aux, particle_momentum, center_of_mass_to_particle);
am_x += particle_local_angular_momentum[0] + aux[0];
am_y += particle_local_angular_momentum[1] + aux[1];
am_z += particle_local_angular_momentum[2] + aux[2];
}
}
}
array_1d<double, 3> angular_momentum;
angular_momentum[0] = am_x;
angular_momentum[1] = am_y;
angular_momentum[2] = am_z;
return angular_momentum;
}
//***************************************************************************************************************
//***************************************************************************************************************
// Check by how much Newton's Third Law is violated
array_1d<double, 3> CalculateSumOfInternalForces(ModelPart& r_model_part)
{
OpenMPUtils::CreatePartition(OpenMPUtils::GetNumThreads(),r_model_part.GetCommunicator().LocalMesh().Elements().size(), mElementsPartition);
double sum_of_contact_forces_x = 0.0;
double sum_of_contact_forces_y = 0.0;
double sum_of_contact_forces_z = 0.0;
#pragma omp parallel for reduction(+ : sum_of_contact_forces_x, sum_of_contact_forces_y, sum_of_contact_forces_z)
for (int k = 0; k < OpenMPUtils::GetNumThreads(); ++k){
for (ElementsArrayType::iterator it = GetElementPartitionBegin(r_model_part, k); it != GetElementPartitionEnd(r_model_part, k); ++it){
if ((it)->IsNot(DEMFlags::BELONGS_TO_A_CLUSTER)){
const array_1d<double, 3>& contact_force = (it)->GetGeometry()[0].FastGetSolutionStepValue(CONTACT_FORCES);
sum_of_contact_forces_x += contact_force[0];
sum_of_contact_forces_y += contact_force[1];
sum_of_contact_forces_z += contact_force[2];
}
}
}
array_1d<double, 3> sum_of_contact_forces;
sum_of_contact_forces[0] = sum_of_contact_forces_x;
sum_of_contact_forces[1] = sum_of_contact_forces_y;
sum_of_contact_forces[2] = sum_of_contact_forces_z;
return sum_of_contact_forces;
}
//***************************************************************************************************************
//***************************************************************************************************************
///@}
///@name Access
///@{
array_1d<double, 3> GetInitialCenterOfMass()
{
return mInitialCenterOfMassAndMass;
}
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a stemplate<class T, std::size_t dim> tring.
virtual std::string Info() const
{
return "";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
///@}
///@name Friends
///@{
std::vector<unsigned int>& GetElementPartition()
{
return (mElementsPartition);
}
ElementsArrayType::iterator GetElementPartitionBegin(ModelPart& r_model_part, unsigned int k)
{
ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements();
return (pElements.ptr_begin() + mElementsPartition[k]);
}
ElementsArrayType::iterator GetElementPartitionEnd(ModelPart& r_model_part, unsigned int k)
{
ElementsArrayType& pElements = r_model_part.GetCommunicator().LocalMesh().Elements();
return (pElements.ptr_begin() + mElementsPartition[k + 1]);
}
///@}
protected:
///@name Protected static Member r_variables
///@{
///@}
///@name Protected member r_variables
///@{ template<class T, std::size_t dim>
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
std::vector<unsigned int> mElementsPartition;
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member r_variables
///@{
///@}
///@name Member r_variables
///@{
array_1d<double, 3> mInitialCenterOfMassAndMass;
double mInitialMass;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
SphericElementGlobalPhysicsCalculator & operator=(SphericElementGlobalPhysicsCalculator const& rOther);
///@}
}; // Class SphericElementGlobalPhysicsCalculator
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
} // namespace Kratos.
#endif // CALCULATE_GLOBAL_PHYSICAL_PROPERTIES_H
|
scatmat.c | #include <stdlib.h>
#include <complex.h>
#include <float.h>
#include <math.h>
#include <string.h>
#ifdef _MACOSX
#include <Accelerate/Accelerate.h>
#else
#include <cblas.h>
#endif
#include "fastsphere.h"
#include "spreflect.h"
#include "translator.h"
#include "scatmat.h"
#include "fsht.h"
#include "farfield.h"
#include "util.h"
/* Reflect incoming plane waves from the surfaces of all spheres. */
int sprflpw (complex double *rhs, spscat *spl, int nsph, shdata *shtr) {
int i, nterm;
complex double *vptr;
spscat *sp;
nterm = shtr->ntheta * shtr->nphi;
#pragma omp parallel for private(i,vptr,sp) default(shared)
for (i = 0; i < nsph; ++i) {
sp = spl + i;
vptr = rhs + i * nterm;
/* Multiply by the reflection coefficient in SH space. */
ffsht (vptr, shtr, sp->spdesc->deg);
spreflect (vptr, vptr, (spl + i)->spdesc->reflect,
sp->spdesc->deg, shtr->nphi, 0, 1);
ifsht (vptr, shtr, sp->spdesc->deg);
}
return nsph;
}
/* Compute translations between all spheres. Augments the output vector, does
* not overwrite it. */
int sptrans (complex double *vout, complex double *vin,
int nsph, trdesc *trans, shdata *shtr) {
int nterm, nsq;
nterm = shtr->ntheta * shtr->nphi;
nsq = nsph * nsph;
/* Perform the translations. */
#pragma omp parallel default(shared)
{
complex double *voptr, *viptr;
int i, j, off, k;
#pragma omp for
for (off = 0; off < nsq; ++off) {
j = off / nsph; /* Source sphere. */
i = off % nsph; /* Destination sphere. */
/* Don't bother with self-translations. Also ignore dense
* translations for the moment. */
if (i == j || trans[off].type != TRPLANE) continue;
/* Do the diagonal, plane-wave translation. */
voptr = vout + i * nterm;
viptr = vin + j * nterm;
/* Copy to output, but only one thread at a time. */
#pragma omp critical(outplane)
for (k = 0; k < nterm; ++k)
voptr[k] += trans[off].trdata[k] * viptr[k];
}
}
return nsph;
}
/* Compute the MVP between the scattering matrix and a specified vector. */
int scatmat (complex double *vout, complex double *vin, spscat *spl,
int nsph, trdesc *trans, shdata *shtr) {
int nterm, n, i;
nterm = shtr->ntheta * shtr->nphi;
n = nterm * nsph;
/* Initialize the output bufer. */
memset (vout, 0, n * sizeof(complex double));
/* Compute the spherical translations. */
sptrans (vout, vin, nsph, trans, shtr);
/* Compute the reflections of plane waves at sphere surfaces. */
sprflpw (vout, spl, nsph, shtr);
/* Subtract the incoming field from the outgoing field. */
#pragma omp parallel for private(i) default(shared)
for (i = 0; i < n; ++i) vout[i] = vin[i] - vout[i];
return nsph;
}
int bicgstab (complex double *sol, complex double *rhs, int guess, spscat *spl,
int nsph, trdesc *trans, shdata *shtr, itconf *itc) {
int i, j, n, nterm;
complex double *r, *rhat, *v, *p, *t;
complex double rho, alpha, omega, beta;
double err, rhn;
nterm = shtr->ntheta * shtr->nphi;
n = nterm * nsph;
rho = alpha = omega = 1.;
/* Allocate and zero the work arrays. */
r = calloc (5 * n, sizeof(complex double));
rhat = r + n;
v = rhat + n;
p = v + n;
t = p + n;
/* Compute the norm of the right-hand side for residual scaling. */
rhn = cblas_dznrm2 (n, rhs, 1);
/* Compute the inital matrix-vector product for the input guess. */
if (guess) scatmat (r, sol, spl, nsph, trans, shtr);
/* Subtract from the RHS to form the residual. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) r[j] = rhs[j] - r[j];
if (!guess) memset (sol, 0, n * sizeof(complex double));
/* Copy the initial residual as the test vector. */
memcpy (rhat, r, n * sizeof(complex double));
/* Find the norm of the initial residual. */
err = cblas_dznrm2(n, r, 1) / rhn;
printf ("True residual: %g\n", err);
/* Run iterations until convergence or the maximum is reached. */
for (i = 0; i < itc->iter && err > itc->eps; ++i) {
/* Pre-compute portion of beta from previous iteration. */
beta = alpha / (rho * omega);
/* Compute rho for this iteration. */
rho = pardot (rhat, r, n);
/* Include the missing factor in beta. */
beta *= rho;
/* Update the search vector. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j)
p[j] = r[j] + beta * (p[j] - omega * v[j]);
/* Compute the first search step, v = A * p. */
scatmat (v, p, spl, nsph, trans, shtr);
/* Compute the next alpha. */
alpha = rho / pardot (rhat, v, n);
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) {
/* Update the solution vector. */
sol[j] += alpha * p[j];
/* Update the residual vector. */
r[j] -= alpha * v[j];
}
/* Compute the scaled residual norm and stop if convergence
* has been achieved. */
err = cblas_dznrm2 (n, r, 1) / rhn;
printf ("BiCG-STAB(%0.1f): %g\n", 0.5 + i, err);
if (err < itc->eps) break;
/* Compute the next search step, t = A * r. */
scatmat (t, r, spl, nsph, trans, shtr);
/* Compute the update direction. */
omega = pardot (t, r, n) / pardot (t, t, n);
/* Update both the residual and the solution guess. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) {
/* Update the solution vector. */
sol[j] += omega * r[j];
/* Update the residual vector. */
r[j] -= omega * t[j];
}
/* Compute the scaled residual norm. */
err = cblas_dznrm2 (n, r, 1) / rhn;
printf ("BiCG-STAB(%d): %g\n", i + 1, err);
}
free (r);
return i;
}
int gmres (complex double *sol, complex double *rhs, int guess, spscat *spl,
int nsph, trdesc *trans, shdata *shtr, itconf *itc) {
int nterm = shtr->ntheta * shtr->nphi, n = nterm * nsph;
long lwork;
int i, j, one = 1, mit = itc->iter;
complex double *h, *v, *beta, *y;
complex double *vp, *hp, *s, cr, cone = 1.;
double rhn, err, *c;
/* Allocate space for all required complex vectors. */
lwork = (mit + 1) * (mit + n + 1) + mit;
v = calloc (lwork, sizeof(complex double)); /* The Krylov subspace. */
beta = v + n * (mit + 1); /* The least-squares RHS. */
h = beta + mit + 1; /* The upper Hessenberg matrix. */
s = h + (mit + 1) * mit; /* Givens rotation sines. */
/* Allocate space for the Givens rotation cosines. */
c = malloc (mit * sizeof(double));
/* Compute the norm of the RHS for residual scaling. */
rhn = cblas_dznrm2 (n, rhs, 1);
/* Compute the initial matrix-vector product for the input guess. */
if (guess) scatmat (v, sol, spl, nsph, trans, shtr);
/* Subtract from the RHS to form the residual. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) v[j] = rhs[j] - v[j];
/* Zero the initial guess if one wasn't provided. */
if (!guess) memset (sol, 0, n * sizeof(complex double));
/* Find the norm of the initial residual. */
err = cblas_dznrm2(n, v, 1);
/* Construct the initial Arnoldi vector by normalizing the residual. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) v[j] /= err;
/* Construct the vector beta for the minimization problem. */
beta[0] = err;
/* Report the RRE. */
err /= rhn;
printf ("True residual: %g\n", err);
for (i = 0; i < mit && err > itc->eps; ++i) {
/* Point to the working space for this iteration. */
vp = v + i * n;
hp = h + i * (mit + 1);
/* Compute the next expansion of the Krylov space. */
scatmat (vp + n, vp, spl, nsph, trans, shtr);
/* Perform modified Gram-Schmidt to orthogonalize the basis. */
/* This also builds the Hessenberg matrix column. */
cmgs (vp + n, hp, v, n, i + 1);
/* Compute the norm of the next basis vector. */
hp[i + 1] = cblas_dznrm2(n, vp + n, 1);
/* Avoid breakdown. */
if (cabs(hp[i + 1]) < DBL_EPSILON) {
++i;
break;
}
/* Normalize the basis vector. */
#pragma omp parallel for default(shared) private(j)
for (j = 0; j < n; ++j) vp[n + j] /= creal(hp[i + 1]);
/* Apply previous Givens rotations to the Hessenberg column. */
for (j = 0; j < i; ++j)
zrot_ (&one, (void *)(hp + j), &one,
(void *)(hp + j + 1), &one,
(void *)(c + j), (void *)(s + j));
/* Compute the Givens rotation for the current iteration. */
zlartg_ ((void *)(hp + i), (void *)(hp + i + 1),
(void *)(c + i), (void *)(s + i), (void *)(&cr));
/* Apply the current Givens rotation to the Hessenberg column. */
hp[i] = cr;
hp[i + 1] = 0;
/* Perform the rotation on the vector beta. */
zrot_ (&one, (void *)(beta + i), &one,
(void *)(beta + i + 1), &one,
(void *)(c + i), (void *)(s + i));
/* Estimate the RRE for this iteration. */
err = cabs(beta[i + 1]) / rhn;
printf ("GMRES(%d): %g\n", i, err);
}
/* If there were any GMRES iterations, update the solution. */
if (i > 0) {
/* Compute the minimizer of the least-squares problem. */
cblas_ztrsv (CblasColMajor, CblasUpper, CblasNoTrans,
CblasNonUnit, i, h, mit + 1, beta, 1);
/* Compute the update to the solution. */
cblas_zgemv (CblasColMajor, CblasNoTrans, n, i,
&cone, v, n, beta, 1, &cone, sol, 1);
}
free (v);
free (c);
return i;
}
|
main.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#include "omp.h"
#include "functions.h"
int main (int argc, char **argv) {
int Nthreads = atoi(argv[1]);//(int) (argv[1][0] - '0');
omp_set_num_threads(Nthreads);
//seed value for the randomizer
double seed = clock(); //this will make your program run differently everytime
//double seed = 0; //uncomment this and your program will behave the same everytime it's run
srand(seed);
//declare storage for an ElGamal cryptosytem
unsigned int p, g, h, x;
//begin with rank 0 getting user's input
unsigned int n;
// printf("Enter a number of bits: "); fflush(stdout);
// char status = scanf("%u",&n);
n = 15;
//make sure the input makes sense
if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars)
printf("Unsupported bit size.\n");
return 0;
}
printf("\n");
//setup an ElGamal cryptosystem
setupElGamal(n,&p,&g,&h,&x);
int bufferSize = 1024;
unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char));
//populate the string with a message
strcpy(message, "Hello, this is the message as a string.");
printf("Message = \"%s\"\n", message);
/* Q1.1 Finish this line */
unsigned int charsPerInt = (n - 1) / 8; // 9 for 1 char, 17 for 2, 25 for 3...
padString(message, charsPerInt);
printf("Padded Message = \"%s\"\n", message);
unsigned int Nchars = strlen(message);
unsigned int Nints = strlen(message)/charsPerInt;
//storage for message as elements of Z_p
unsigned int *Zmessage =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
//storage for extra encryption coefficient
unsigned int *a =
(unsigned int *) malloc(Nints*sizeof(unsigned int));
// cast the string into an unsigned int array
convertStringToZ(message, Nchars, Zmessage, Nints);
//Encrypt the Zmessage with the ElGamal cyrptographic system
ElGamalEncrypt(Zmessage,a,Nints,p,g,h);
printf("The encrypted text is: ");
for (unsigned int i=0;i<Nints;i++) {
printf("(%u,%u) ", Zmessage[i], a[i]);
}
printf("]\n");
char * finalString = cypherToString(Zmessage, a, Nints);
printf("The encrypted numbers as a string: %s\n", finalString);
convertStringBack(finalString);
//Decrypt the Zmessage with the ElGamal cyrptographic system
ElGamalDecrypt(Zmessage,a,Nints,p,x);
convertZToString(Zmessage, Nints, message, Nchars);
printf("Decrypted Message = \"%s\"\n", message);
printf("\n");
//Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel
printf("Using %d OpenMP threads to find the secret key...\n", Nthreads);
/* Q2.3 Parallelize this loop with OpenMP */
double startTime = omp_get_wtime();
int found = 0;
#pragma omp parallel for shared(found)
for (unsigned int i=0;i<p-1;i++) {
if (found == 1) {
i = p;
continue;
}
if (modExp(g,i+1,p)==h) {
found = 1;
printf("Secret key found! x = %u \n", i);
}
}
double endTime = omp_get_wtime();
double totalTime = endTime-startTime;
double work = (double) p;
double throughput = work/totalTime;
printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput);
return 0;
}
|
convolution_3x3.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void conv3x3s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
float* outptr0n = outptr0 + outw;
float* outptr1n = outptr1 + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr1);
float32x4_t _sum0n = vld1q_f32(outptr0n);
float32x4_t _sum1n = vld1q_f32(outptr1n);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r30n = vld1q_f32(r3 + 4);
float32x4_t _r31 = vextq_f32(_r30, _r30n, 1);
float32x4_t _r32 = vextq_f32(_r30, _r30n, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2);
_sum0n = vfmaq_laneq_f32(_sum0n, _r10, _k00, 0);
_sum0n = vfmaq_laneq_f32(_sum0n, _r11, _k00, 1);
_sum0n = vfmaq_laneq_f32(_sum0n, _r12, _k00, 2);
_sum0n = vfmaq_laneq_f32(_sum0n, _r20, _k03, 0);
_sum0n = vfmaq_laneq_f32(_sum0n, _r21, _k03, 1);
_sum0n = vfmaq_laneq_f32(_sum0n, _r22, _k03, 2);
_sum0n = vfmaq_laneq_f32(_sum0n, _r30, _k06, 0);
_sum0n = vfmaq_laneq_f32(_sum0n, _r31, _k06, 1);
_sum0n = vfmaq_laneq_f32(_sum0n, _r32, _k06, 2);
_sum1n = vfmaq_laneq_f32(_sum1n, _r10, _k10, 0);
_sum1n = vfmaq_laneq_f32(_sum1n, _r11, _k10, 1);
_sum1n = vfmaq_laneq_f32(_sum1n, _r12, _k10, 2);
_sum1n = vfmaq_laneq_f32(_sum1n, _r20, _k13, 0);
_sum1n = vfmaq_laneq_f32(_sum1n, _r21, _k13, 1);
_sum1n = vfmaq_laneq_f32(_sum1n, _r22, _k13, 2);
_sum1n = vfmaq_laneq_f32(_sum1n, _r30, _k16, 0);
_sum1n = vfmaq_laneq_f32(_sum1n, _r31, _k16, 1);
_sum1n = vfmaq_laneq_f32(_sum1n, _r32, _k16, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
vst1q_f32(outptr0n, _sum0n);
vst1q_f32(outptr1n, _sum1n);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outptr0 += 4;
outptr1 += 4;
outptr0n += 4;
outptr1n += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q14, q15, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1 :64] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2 :64] \n"// _sum1
"vmla.f32 q6, q8, %e18[0] \n"
"vmla.f32 q7, q8, %e21[0] \n"
"pld [%3, #128] \n"
"vld1.f32 {d24-d25}, [%3] \n"// _sum0n
"pld [%4, #128] \n"
"vld1.f32 {d26-d27}, [%4] \n"// _sum1n
"vmla.f32 q12, q14, %e20[0] \n"
"vmla.f32 q13, q14, %e23[0] \n"
"vext.32 q8, q8, q9, #2 \n"
"vext.32 q9, q14, q15, #1 \n"
"vmla.f32 q6, q10, %e18[1] \n"
"vmla.f32 q7, q10, %e21[1] \n"
"vmla.f32 q12, q11, %f20[0] \n"
"vmla.f32 q13, q11, %f23[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d28-d30}, [%6] \n"// r1
"add %6, #16 \n"
"vmla.f32 q6, q8, %f18[0] \n"
"vmla.f32 q7, q8, %f21[0] \n"
"vmla.f32 q12, q9, %e20[1] \n"
"vmla.f32 q13, q9, %e23[1] \n"
"vext.32 q10, q14, q15, #1 \n"
"vmla.f32 q6, q14, %e19[0] \n"
"vmla.f32 q7, q14, %e22[0] \n"
"vmla.f32 q12, q14, %e18[0] \n"
"vmla.f32 q13, q14, %e21[0] \n"
"vext.32 q11, q14, q15, #2 \n"
"vmla.f32 q6, q10, %e19[1] \n"
"vmla.f32 q7, q10, %e22[1] \n"
"vmla.f32 q12, q10, %e18[1] \n"
"vmla.f32 q13, q10, %e21[1] \n"
"pld [%7, #192] \n"
"vld1.f32 {d16-d18}, [%7 :64] \n"// r2
"add %7, #16 \n"
"vmla.f32 q6, q11, %f19[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q12, q11, %f18[0] \n"
"vmla.f32 q13, q11, %f21[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vmla.f32 q6, q8, %e20[0] \n"
"vmla.f32 q7, q8, %e23[0] \n"
"vmla.f32 q12, q8, %e19[0] \n"
"vmla.f32 q13, q8, %e22[0] \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e20[1] \n"
"vmla.f32 q7, q10, %e23[1] \n"
"vmla.f32 q12, q10, %e19[1] \n"
"vmla.f32 q13, q10, %e22[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5 :64] \n"// r0
"add %5, #16 \n"
"vmla.f32 q6, q11, %f20[0] \n"
"vmla.f32 q7, q11, %f23[0] \n"
"vmla.f32 q12, q11, %f19[0] \n"
"vmla.f32 q13, q11, %f22[0] \n"
"pld [%8, #192] \n"
"vld1.f32 {d28-d30}, [%8] \n"// r3
"add %8, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vst1.f32 {d12-d13}, [%1 : 64]!\n"
"vst1.f32 {d14-d15}, [%2 : 64]!\n"
"vext.32 q11, q14, q15, #2 \n"
"vst1.f32 {d24-d25}, [%3]! \n"
"vst1.f32 {d26-d27}, [%4]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %5, #16 \n"
"sub %8, #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr0n), // %3
"=r"(outptr1n), // %4
"=r"(r0), // %5
"=r"(r1), // %6
"=r"(r2), // %7
"=r"(r3) // %8
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr0n),
"4"(outptr1n),
"5"(r0),
"6"(r1),
"7"(r2),
"8"(r3),
"w"(_k00), // %18
"w"(_k03), // %19
"w"(_k06), // %20
"w"(_k10), // %21
"w"(_k13), // %22
"w"(_k16) // %23
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
float32x4_t _sum0n = vmulq_f32(_r10, _k00);
float32x4_t _sum1n = vmulq_f32(_r10, _k10);
_sum0n = vmlaq_f32(_sum0n, _r20, _k03);
_sum1n = vmlaq_f32(_sum1n, _r20, _k13);
_sum0n = vmlaq_f32(_sum0n, _r30, _k06);
_sum1n = vmlaq_f32(_sum1n, _r30, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
_sum0n = vsetq_lane_f32(*outptr0n, _sum0n, 3);
_sum1n = vsetq_lane_f32(*outptr1n, _sum1n, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
*outptr0n = vaddvq_f32(_sum0n);
*outptr1n = vaddvq_f32(_sum1n);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss0n = vadd_f32(vget_low_f32(_sum0n), vget_high_f32(_sum0n));
float32x2_t _ss1n = vadd_f32(vget_low_f32(_sum1n), vget_high_f32(_sum1n));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
float32x2_t _ss01n = vpadd_f32(_ss0n, _ss1n);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
*outptr0n = vget_lane_f32(_ss01n, 0);
*outptr1n = vget_lane_f32(_ss01n, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum0n = 0.f;
float sum1 = 0.f;
float sum1n = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
sum0n += r1[0] * k0[0];
sum0n += r1[1] * k0[1];
sum0n += r1[2] * k0[2];
sum0n += r2[0] * k0[3];
sum0n += r2[1] * k0[4];
sum0n += r2[2] * k0[5];
sum0n += r3[0] * k0[6];
sum0n += r3[1] * k0[7];
sum0n += r3[2] * k0[8];
sum1n += r1[0] * k1[0];
sum1n += r1[1] * k1[1];
sum1n += r1[2] * k1[2];
sum1n += r2[0] * k1[3];
sum1n += r2[1] * k1[4];
sum1n += r2[2] * k1[5];
sum1n += r3[0] * k1[6];
sum1n += r3[1] * k1[7];
sum1n += r3[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
*outptr0n += sum0n;
*outptr1n += sum1n;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr1++;
outptr0n++;
outptr1n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr1 += outw;
outptr0n += outw;
outptr1n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum0 = vld1q_f32(outptr0);
float32x4_t _sum1 = vld1q_f32(outptr1);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r00, _k00, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r01, _k00, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r02, _k00, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r10, _k03, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r11, _k03, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r12, _k03, 2);
_sum0 = vfmaq_laneq_f32(_sum0, _r20, _k06, 0);
_sum0 = vfmaq_laneq_f32(_sum0, _r21, _k06, 1);
_sum0 = vfmaq_laneq_f32(_sum0, _r22, _k06, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k10, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r01, _k10, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k10, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r10, _k13, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k13, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r12, _k13, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k16, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r21, _k16, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k16, 2);
vst1q_f32(outptr0, _sum0);
vst1q_f32(outptr1, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
outptr0 += 4;
outptr1 += 4;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r0
"add %3, #16 \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// _sum0
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// _sum1
"vmul.f32 q14, q8, %e12[0] \n"
"vmul.f32 q15, q8, %e15[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e12[1] \n"
"vmla.f32 q7, q10, %e15[1] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q14, q11, %f12[0] \n"
"vmla.f32 q15, q11, %f15[0] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q14, q10, %e13[1] \n"
"vmla.f32 q15, q10, %e16[1] \n"
"pld [%5, #192] \n"
"vld1.f32 {d16-d18}, [%5] \n"// r2
"add %5, #16 \n"
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"vmla.f32 q14, q8, %e14[0] \n"
"vmla.f32 q15, q8, %e17[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q6, q10, %e14[1] \n"
"vmla.f32 q7, q10, %e17[1] \n"
"vmla.f32 q14, q11, %f14[0] \n"
"vmla.f32 q15, q11, %f17[0] \n"
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0++;
r1++;
r2++;
outptr0++;
outptr1++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* r3 = img0 + w*3;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k3456 = vld1q_f32(kernel0+3);
float32x4_t _k6789 = vld1q_f32(kernel0+6);
#else
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#endif // __ARM_NEON
int i = 0;
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum1 = vld1q_f32(outptr);
float32x4_t _sum3 = vld1q_f32(outptr2);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _r30n = vld1q_f32(r3 + 4);
float32x4_t _r31 = vextq_f32(_r30, _r30n, 1);
float32x4_t _r32 = vextq_f32(_r30, _r30n, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0);
float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2);
_sum3 = vfmaq_laneq_f32(_sum3, _r10, _k0123, 0);
float32x4_t _sum4 = vmulq_laneq_f32(_r11, _k0123, 1);
_sum3 = vfmaq_laneq_f32(_sum3, _r12, _k0123, 2);
_sum4 = vfmaq_laneq_f32(_sum4, _r20, _k3456, 0);
_sum3 = vfmaq_laneq_f32(_sum3, _r21, _k3456, 1);
_sum4 = vfmaq_laneq_f32(_sum4, _r22, _k3456, 2);
_sum3 = vfmaq_laneq_f32(_sum3, _r30, _k6789, 0);
_sum4 = vfmaq_laneq_f32(_sum4, _r31, _k6789, 1);
_sum3 = vfmaq_laneq_f32(_sum3, _r32, _k6789, 2);
_sum1 = vaddq_f32(_sum1, _sum2);
_sum3 = vaddq_f32(_sum3, _sum4);
vst1q_f32(outptr, _sum1);
vst1q_f32(outptr2, _sum3);
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
outptr += 4;
outptr2 += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1 :64] \n"// _sum
"vmla.f32 q7, q9, %e14[0] \n"
"vmul.f32 q6, q11, %e14[1] \n"
"vmul.f32 q13, q12, %f14[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d18-d20}, [%4] \n"// r1
"add %4, #16 \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e15[1] \n"
"vmla.f32 q13, q12, %f15[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n"// _sum2
"vmla.f32 q8, q9, %e14[0] \n"
"vmul.f32 q14, q11, %e14[1] \n"
"vmul.f32 q15, q12, %f14[0] \n"
"pld [%5, #192] \n"
"vld1.f32 {d18-d20}, [%5 :64] \n"// r2
"add %5, #16 \n"
"vmla.f32 q7, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q6, q11, %e16[1] \n"
"vmla.f32 q13, q12, %f16[0] \n"
"vmla.f32 q8, q9, %e15[0] \n"
"vmla.f32 q14, q11, %e15[1] \n"
"vmla.f32 q15, q12, %f15[0] \n"
"pld [%6, #192] \n"
"vld1.f32 {d18-d20}, [%6] \n"// r3
"add %6, #16 \n"
"vmla.f32 q8, q9, %e16[0] \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"vmla.f32 q14, q11, %e16[1] \n"
"vmla.f32 q15, q12, %f16[0] \n"
"vadd.f32 q7, q7, q6 \n"
"pld [%3, #192] \n"
"vld1.f32 {d18-d20}, [%3 :64] \n"// r0
"vadd.f32 q8, q8, q14 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q8, q8, q15 \n"
"vext.32 q11, q9, q10, #1 \n"
"vext.32 q12, q9, q10, #2 \n"
"add %3, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %3, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3) // %6
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"w"(_k0123), // %14
"w"(_k3456), // %15
"w"(_k6789) // %16
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r30 = vld1q_f32(r3);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
float32x4_t _sum2 = vmulq_f32(_r10, _k0123);
_sum2 = vmlaq_f32(_sum2, _r20, _k3456);
_sum2 = vmlaq_f32(_sum2, _r30, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
_sum2 = vsetq_lane_f32(*outptr2, _sum2, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
*outptr2 = vaddvq_f32(_sum2);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _sss2 = vpadd_f32(_ss, _ss2);
*outptr = vget_lane_f32(_sss2, 0);
*outptr2 = vget_lane_f32(_sss2, 1);
#endif // __aarch64__
#else
float sum = 0;
float sum2 = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
*outptr += sum;
*outptr2 += sum2;
#endif
r0++;
r1++;
r2++;
r3++;
outptr++;
outptr2++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _sum1 = vld1q_f32(outptr);
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r00n = vld1q_f32(r0 + 4);
float32x4_t _r01 = vextq_f32(_r00, _r00n, 1);
float32x4_t _r02 = vextq_f32(_r00, _r00n, 2);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r10n = vld1q_f32(r1 + 4);
float32x4_t _r11 = vextq_f32(_r10, _r10n, 1);
float32x4_t _r12 = vextq_f32(_r10, _r10n, 2);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _r20n = vld1q_f32(r2 + 4);
float32x4_t _r21 = vextq_f32(_r20, _r20n, 1);
float32x4_t _r22 = vextq_f32(_r20, _r20n, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r00, _k0123, 0);
float32x4_t _sum2 = vmulq_laneq_f32(_r01, _k0123, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r02, _k0123, 2);
_sum2 = vfmaq_laneq_f32(_sum2, _r10, _k3456, 0);
_sum1 = vfmaq_laneq_f32(_sum1, _r11, _k3456, 1);
_sum2 = vfmaq_laneq_f32(_sum2, _r12, _k3456, 2);
_sum1 = vfmaq_laneq_f32(_sum1, _r20, _k6789, 0);
_sum2 = vfmaq_laneq_f32(_sum2, _r21, _k6789, 1);
_sum1 = vfmaq_laneq_f32(_sum1, _r22, _k6789, 2);
_sum1 = vaddq_f32(_sum1, _sum2);
vst1q_f32(outptr, _sum1);
r0 += 4;
r1 += 4;
r2 += 4;
outptr += 4;
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n"// _sum
"vmla.f32 q7, q8, %e10[0] \n"
"vmul.f32 q13, q10, %e10[1] \n"
"vmul.f32 q14, q11, %f10[0] \n"
"pld [%3, #192] \n"
"vld1.f32 {d16-d18}, [%3] \n"// r1
"add %3, #16 \n"
"vmla.f32 q7, q8, %e11[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e11[1] \n"
"vmla.f32 q14, q11, %f11[0] \n"
"pld [%4, #192] \n"
"vld1.f32 {d16-d18}, [%4] \n"// r2
"add %4, #16 \n"
"vmla.f32 q7, q8, %e12[0] \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vmla.f32 q13, q10, %e12[1] \n"
"vmla.f32 q14, q11, %f12[0] \n"
"pld [%2, #192] \n"
"vld1.f32 {d16-d18}, [%2] \n"// r0
"add %2, #16 \n"
"vadd.f32 q7, q7, q13 \n"
"vadd.f32 q7, q7, q14 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif
r0++;
r1++;
r2++;
outptr++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd64_transform_kernel_neon(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8*8, inch, outch);
const float ktm[8][3] = {
{ 1.0f, 0.0f, 0.0f},
{-2.0f/9, -2.0f/9, -2.0f/9},
{-2.0f/9, 2.0f/9, -2.0f/9},
{1.0f/90, 1.0f/45, 2.0f/45},
{1.0f/90, -1.0f/45, 2.0f/45},
{1.0f/45, 1.0f/90, 1.0f/180},
{1.0f/45, -1.0f/90, 1.0f/180},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i=0; i<8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j=0; j<8; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<8; i++)
{
kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// optimized layout for winograd4
// interleave weights
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
Mat kernel_tm2(8*8 * inch * 4, 1, nn_outch + (outch % 4 + 3) / 4);
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
float* ktm2 = kernel_tm2.channel(pp);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
int q=0;
#if __ARM_NEON && __aarch64__
for (; q+3<inch; q+=4)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k02 = kernel0_tm.row(q+2);
const float* k03 = kernel0_tm.row(q+3);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k12 = kernel1_tm.row(q+2);
const float* k13 = kernel1_tm.row(q+3);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k22 = kernel2_tm.row(q+2);
const float* k23 = kernel2_tm.row(q+3);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
const float* k32 = kernel3_tm.row(q+2);
const float* k33 = kernel3_tm.row(q+3);
for (int r=0; r<16; r++)
{
// split into two asm blocks for gcc reject over 30 oprands :(
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k02), // %3
"=r"(k03), // %4
"=r"(k10), // %5
"=r"(k11), // %6
"=r"(k12), // %7
"=r"(k13) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k02),
"4"(k03),
"5"(k10),
"6"(k11),
"7"(k12),
"8"(k13)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"ld1 {v2.4s}, [%3], #16 \n"
"ld1 {v3.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"ld1 {v2.4s}, [%7], #16 \n"
"ld1 {v3.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
: "=r"(ktm2), // %0
"=r"(k20), // %1
"=r"(k21), // %2
"=r"(k22), // %3
"=r"(k23), // %4
"=r"(k30), // %5
"=r"(k31), // %6
"=r"(k32), // %7
"=r"(k33) // %8
: "0"(ktm2),
"1"(k20),
"2"(k21),
"3"(k22),
"4"(k23),
"5"(k30),
"6"(k31),
"7"(k32),
"8"(k33)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
}
#endif // __ARM_NEON && __aarch64__
for (; q+1<inch; q+=2)
{
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
const float* k20 = kernel2_tm.row(q);
const float* k21 = kernel2_tm.row(q+1);
const float* k30 = kernel3_tm.row(q);
const float* k31 = kernel3_tm.row(q+1);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%5], #16 \n"
"ld1 {v1.4s}, [%6], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%7], #16 \n"
"ld1 {v1.4s}, [%8], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "v0", "v1"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vld1.f32 {d2-d3}, [%6 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%7 :128]! \n"
"vld1.f32 {d2-d3}, [%8 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k01), // %2
"=r"(k10), // %3
"=r"(k11), // %4
"=r"(k20), // %5
"=r"(k21), // %6
"=r"(k30), // %7
"=r"(k31) // %8
: "0"(ktm2),
"1"(k00),
"2"(k01),
"3"(k10),
"4"(k11),
"5"(k20),
"6"(k21),
"7"(k30),
"8"(k31)
: "cc", "memory", "q0", "q1"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[0 +m] = k00[m];
ktm2[4 +m] = k01[m];
ktm2[8 +m] = k10[m];
ktm2[12+m] = k11[m];
ktm2[16+m] = k20[m];
ktm2[20+m] = k21[m];
ktm2[24+m] = k30[m];
ktm2[28+m] = k31[m];
}
k00 += 4;
k01 += 4;
k10 += 4;
k11 += 4;
k20 += 4;
k21 += 4;
k30 += 4;
k31 += 4;
ktm2 += 32;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"ld1 {v1.4s}, [%2], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
"ld1 {v0.4s}, [%3], #16 \n"
"ld1 {v1.4s}, [%4], #16 \n"
"st1 {v0.4s, v1.4s}, [%0], #32 \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "v0", "v1"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vld1.f32 {d2-d3}, [%2 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
"vld1.f32 {d0-d1}, [%3 :128]! \n"
"vld1.f32 {d2-d3}, [%4 :128]! \n"
"vst1.f32 {d0-d3}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00), // %1
"=r"(k10), // %2
"=r"(k20), // %3
"=r"(k30) // %4
: "0"(ktm2),
"1"(k00),
"2"(k10),
"3"(k20),
"4"(k30)
: "cc", "memory", "q0", "q1"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[0 +m] = k00[m];
ktm2[4 +m] = k10[m];
ktm2[8 +m] = k20[m];
ktm2[12+m] = k30[m];
}
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
ktm2 += 16;
#endif // __ARM_NEON
}
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
float* ktm2 = (float*)kernel_tm2.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start);
const Mat kernel0_tm = kernel_tm.channel(p);
int q = 0;
for (; q<inch; q++)
{
const float* k00 = kernel0_tm.row(q);
for (int r=0; r<16; r++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%1], #16 \n"
"st1 {v0.4s}, [%0], #16 \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "v0"
);
#else
asm volatile(
"vld1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d0-d1}, [%0 :128]! \n"
: "=r"(ktm2), // %0
"=r"(k00) // %1
: "0"(ktm2),
"1"(k00)
: "cc", "memory", "q0"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
ktm2[m] = k00[m];
}
k00 += 4;
ktm2 += 4;
#endif // __ARM_NEON
}
}
}
kernel_tm = kernel_tm2;
}
static void conv3x3s1_winograd64_transform_kernel_neon5(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(8*8, inch, outch);
const float ktm[8][3] = {
{ 1.0f, 0.0f, 0.0f},
{-2.0f/9, -2.0f/9, -2.0f/9},
{-2.0f/9, 2.0f/9, -2.0f/9},
{1.0f/90, 1.0f/45, 2.0f/45},
{1.0f/90, -1.0f/45, 2.0f/45},
{1.0f/45, 1.0f/90, 1.0f/180},
{1.0f/45, -1.0f/90, 1.0f/180},
{ 0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const float* kernel0 = (const float*)kernel + p*inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i=0; i<8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j=0; j<8; j++)
{
float* tmpp = &tmp[j][0];
for (int i=0; i<8; i++)
{
kernel_tm0[j*8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// optimized layout for winograd5
// interleave weights
// Mat kernel_tm2(8*8, inch, outch);
// Mat kernel_tm2(inch, 64, outch);
#if __aarch64__
Mat kernel_tm2(8*4*(inch/4) + 8*(inch%4), 64, outch/8 + (outch%8)/4 + outch%4);
#else
Mat kernel_tm2(4*4*(inch/4) + 4*(inch%4), 64, outch/4 + outch%4);
#endif
int p=0;
#if __aarch64__
for (; p+7<outch; p+=8)
{
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
const Mat kernel4_tm = kernel_tm.channel(p+4);
const Mat kernel5_tm = kernel_tm.channel(p+5);
const Mat kernel6_tm = kernel_tm.channel(p+6);
const Mat kernel7_tm = kernel_tm.channel(p+7);
Mat ktm2 = kernel_tm2.channel(p/8);
for (int r=0; r<64; r++)
{
float* ktm2p = ktm2.row(r);
int q=0;
for (; q+3<inch; q+=4)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm0_1 = kernel0_tm.row(q+1);
const float* ktm0_2 = kernel0_tm.row(q+2);
const float* ktm0_3 = kernel0_tm.row(q+3);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm0_1[r];
ktm2p[2] = ktm0_2[r];
ktm2p[3] = ktm0_3[r];
ktm2p += 4;
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm1_1 = kernel1_tm.row(q+1);
const float* ktm1_2 = kernel1_tm.row(q+2);
const float* ktm1_3 = kernel1_tm.row(q+3);
ktm2p[0] = ktm1_0[r];
ktm2p[1] = ktm1_1[r];
ktm2p[2] = ktm1_2[r];
ktm2p[3] = ktm1_3[r];
ktm2p += 4;
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm2_1 = kernel2_tm.row(q+1);
const float* ktm2_2 = kernel2_tm.row(q+2);
const float* ktm2_3 = kernel2_tm.row(q+3);
ktm2p[0] = ktm2_0[r];
ktm2p[1] = ktm2_1[r];
ktm2p[2] = ktm2_2[r];
ktm2p[3] = ktm2_3[r];
ktm2p += 4;
const float* ktm3_0 = kernel3_tm.row(q);
const float* ktm3_1 = kernel3_tm.row(q+1);
const float* ktm3_2 = kernel3_tm.row(q+2);
const float* ktm3_3 = kernel3_tm.row(q+3);
ktm2p[0] = ktm3_0[r];
ktm2p[1] = ktm3_1[r];
ktm2p[2] = ktm3_2[r];
ktm2p[3] = ktm3_3[r];
ktm2p += 4;
const float* ktm4_0 = kernel4_tm.row(q);
const float* ktm4_1 = kernel4_tm.row(q+1);
const float* ktm4_2 = kernel4_tm.row(q+2);
const float* ktm4_3 = kernel4_tm.row(q+3);
ktm2p[0] = ktm4_0[r];
ktm2p[1] = ktm4_1[r];
ktm2p[2] = ktm4_2[r];
ktm2p[3] = ktm4_3[r];
ktm2p += 4;
const float* ktm5_0 = kernel5_tm.row(q);
const float* ktm5_1 = kernel5_tm.row(q+1);
const float* ktm5_2 = kernel5_tm.row(q+2);
const float* ktm5_3 = kernel5_tm.row(q+3);
ktm2p[0] = ktm5_0[r];
ktm2p[1] = ktm5_1[r];
ktm2p[2] = ktm5_2[r];
ktm2p[3] = ktm5_3[r];
ktm2p += 4;
const float* ktm6_0 = kernel6_tm.row(q);
const float* ktm6_1 = kernel6_tm.row(q+1);
const float* ktm6_2 = kernel6_tm.row(q+2);
const float* ktm6_3 = kernel6_tm.row(q+3);
ktm2p[0] = ktm6_0[r];
ktm2p[1] = ktm6_1[r];
ktm2p[2] = ktm6_2[r];
ktm2p[3] = ktm6_3[r];
ktm2p += 4;
const float* ktm7_0 = kernel7_tm.row(q);
const float* ktm7_1 = kernel7_tm.row(q+1);
const float* ktm7_2 = kernel7_tm.row(q+2);
const float* ktm7_3 = kernel7_tm.row(q+3);
ktm2p[0] = ktm7_0[r];
ktm2p[1] = ktm7_1[r];
ktm2p[2] = ktm7_2[r];
ktm2p[3] = ktm7_3[r];
ktm2p += 4;
}
for (; q<inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm3_0 = kernel3_tm.row(q);
const float* ktm4_0 = kernel4_tm.row(q);
const float* ktm5_0 = kernel5_tm.row(q);
const float* ktm6_0 = kernel6_tm.row(q);
const float* ktm7_0 = kernel7_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm1_0[r];
ktm2p[2] = ktm2_0[r];
ktm2p[3] = ktm3_0[r];
ktm2p[4] = ktm4_0[r];
ktm2p[5] = ktm5_0[r];
ktm2p[6] = ktm6_0[r];
ktm2p[7] = ktm7_0[r];
ktm2p += 8;
}
}
}
#endif // __aarch64__
for (; p+3<outch; p+=4)
{
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
#if __aarch64__
Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4);
#else
Mat ktm2 = kernel_tm2.channel(p/4);
#endif
for (int r=0; r<64; r++)
{
float* ktm2p = ktm2.row(r);
int q=0;
for (; q+3<inch; q+=4)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm0_1 = kernel0_tm.row(q+1);
const float* ktm0_2 = kernel0_tm.row(q+2);
const float* ktm0_3 = kernel0_tm.row(q+3);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm0_1[r];
ktm2p[2] = ktm0_2[r];
ktm2p[3] = ktm0_3[r];
ktm2p += 4;
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm1_1 = kernel1_tm.row(q+1);
const float* ktm1_2 = kernel1_tm.row(q+2);
const float* ktm1_3 = kernel1_tm.row(q+3);
ktm2p[0] = ktm1_0[r];
ktm2p[1] = ktm1_1[r];
ktm2p[2] = ktm1_2[r];
ktm2p[3] = ktm1_3[r];
ktm2p += 4;
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm2_1 = kernel2_tm.row(q+1);
const float* ktm2_2 = kernel2_tm.row(q+2);
const float* ktm2_3 = kernel2_tm.row(q+3);
ktm2p[0] = ktm2_0[r];
ktm2p[1] = ktm2_1[r];
ktm2p[2] = ktm2_2[r];
ktm2p[3] = ktm2_3[r];
ktm2p += 4;
const float* ktm3_0 = kernel3_tm.row(q);
const float* ktm3_1 = kernel3_tm.row(q+1);
const float* ktm3_2 = kernel3_tm.row(q+2);
const float* ktm3_3 = kernel3_tm.row(q+3);
ktm2p[0] = ktm3_0[r];
ktm2p[1] = ktm3_1[r];
ktm2p[2] = ktm3_2[r];
ktm2p[3] = ktm3_3[r];
ktm2p += 4;
}
for (; q<inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm1_0 = kernel1_tm.row(q);
const float* ktm2_0 = kernel2_tm.row(q);
const float* ktm3_0 = kernel3_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm1_0[r];
ktm2p[2] = ktm2_0[r];
ktm2p[3] = ktm3_0[r];
ktm2p += 4;
}
}
}
for (; p<outch; p++)
{
const Mat kernel0_tm = kernel_tm.channel(p);
#if __aarch64__
Mat ktm2 = kernel_tm2.channel(p/8+(p%8)/4+p%4);
#else
Mat ktm2 = kernel_tm2.channel(p/4+p%4);
#endif
for (int r=0; r<64; r++)
{
float* ktm2p = ktm2.row(r);
int q=0;
for (; q+3<inch; q+=4)
{
const float* ktm0_0 = kernel0_tm.row(q);
const float* ktm0_1 = kernel0_tm.row(q+1);
const float* ktm0_2 = kernel0_tm.row(q+2);
const float* ktm0_3 = kernel0_tm.row(q+3);
ktm2p[0] = ktm0_0[r];
ktm2p[1] = ktm0_1[r];
ktm2p[2] = ktm0_2[r];
ktm2p[3] = ktm0_3[r];
ktm2p += 4;
}
for (; q<inch; q++)
{
const float* ktm0_0 = kernel0_tm.row(q);
ktm2p[0] = ktm0_0[r];
ktm2p += 1;
}
}
}
kernel_tm = kernel_tm2;
}
#if 0//TODO remove old code sometime later
static void conv3x3s1_winograd64_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(8*8, w_tm/8 * h_tm/8, inch);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm = img0_tm.row(i * w_tm/8 + j);
// TODO neon optimize
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
r0_tm += 8;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(8*8, w_tm/8 * h_tm/8, outch);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
out2_tm.fill(0.f);
out3_tm.fill(0.f);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
const float* k20 = kernel2_tm.row(q);
const float* k30 = kernel3_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
#if __ARM_NEON
#if __aarch64__
for (int m=0; m+7<64; m+=8)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output2_tm = vld1q_f32(output2_tm);
float32x4_t _output3_tm = vld1q_f32(output3_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k00 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k01 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k02 = vld1q_f32(k00);
k00 += 64;
float32x4_t _k03 = vld1q_f32(k00);
k00 += 64;
k00 -= 64*4;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k02);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k03);
float32x4_t _k10 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k11 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k12 = vld1q_f32(k10);
k10 += 64;
float32x4_t _k13 = vld1q_f32(k10);
k10 += 64;
k10 -= 64*4;
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tm = vmlaq_f32(_output1_tm, _r2, _k12);
_output1_tm = vmlaq_f32(_output1_tm, _r3, _k13);
float32x4_t _k20 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k21 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k22 = vld1q_f32(k20);
k20 += 64;
float32x4_t _k23 = vld1q_f32(k20);
k20 += 64;
k20 -= 64*4;
_output2_tm = vmlaq_f32(_output2_tm, _r0, _k20);
_output2_tm = vmlaq_f32(_output2_tm, _r1, _k21);
_output2_tm = vmlaq_f32(_output2_tm, _r2, _k22);
_output2_tm = vmlaq_f32(_output2_tm, _r3, _k23);
float32x4_t _k30 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k31 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k32 = vld1q_f32(k30);
k30 += 64;
float32x4_t _k33 = vld1q_f32(k30);
k30 += 64;
k30 -= 64*4;
_output3_tm = vmlaq_f32(_output3_tm, _r0, _k30);
_output3_tm = vmlaq_f32(_output3_tm, _r1, _k31);
_output3_tm = vmlaq_f32(_output3_tm, _r2, _k32);
_output3_tm = vmlaq_f32(_output3_tm, _r3, _k33);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output2_tm, _output2_tm);
vst1q_f32(output3_tm, _output3_tm);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
float32x4_t _output0_tmn = vld1q_f32(output0_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm);
float32x4_t _output2_tmn = vld1q_f32(output2_tm);
float32x4_t _output3_tmn = vld1q_f32(output3_tm);
float32x4_t _r0n = vld1q_f32(r0);
float32x4_t _r1n = vld1q_f32(r1);
float32x4_t _r2n = vld1q_f32(r2);
float32x4_t _r3n = vld1q_f32(r3);
float32x4_t _k00n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k01n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k02n = vld1q_f32(k00);
k00 += 64;
float32x4_t _k03n = vld1q_f32(k00);
k00 += 64;
k00 -= 64*4;
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k02n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k03n);
float32x4_t _k10n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k11n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k12n = vld1q_f32(k10);
k10 += 64;
float32x4_t _k13n = vld1q_f32(k10);
k10 += 64;
k10 -= 64*4;
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r2n, _k12n);
_output1_tmn = vmlaq_f32(_output1_tmn, _r3n, _k13n);
float32x4_t _k20n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k21n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k22n = vld1q_f32(k20);
k20 += 64;
float32x4_t _k23n = vld1q_f32(k20);
k20 += 64;
k20 -= 64*4;
_output2_tmn = vmlaq_f32(_output2_tmn, _r0n, _k20n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r1n, _k21n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r2n, _k22n);
_output2_tmn = vmlaq_f32(_output2_tmn, _r3n, _k23n);
float32x4_t _k30n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k31n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k32n = vld1q_f32(k30);
k30 += 64;
float32x4_t _k33n = vld1q_f32(k30);
k30 += 64;
k30 -= 64*4;
_output3_tmn = vmlaq_f32(_output3_tmn, _r0n, _k30n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r1n, _k31n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r2n, _k32n);
_output3_tmn = vmlaq_f32(_output3_tmn, _r3n, _k33n);
vst1q_f32(output0_tm, _output0_tmn);
vst1q_f32(output1_tm, _output1_tmn);
vst1q_f32(output2_tm, _output2_tmn);
vst1q_f32(output3_tm, _output3_tmn);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k00 += 4;
k10 += 4;
k20 += 4;
k30 += 4;
}
#else // __aarch64__
asm volatile(
"mov r4, #8 \n"
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm
"0: \n"
"pld [%4, #256] \n"
"vld1.f32 {d0-d3}, [%4 :128]! \n"//q0 q1 = _r0
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k00
"add %8, %8, #256 \n"
"vmla.f32 q8, q0, q10 \n"
"vmla.f32 q9, q1, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]\n"//q12 q13 = _output1_tm
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k10
"add %9, %9, #256 \n"
"vmla.f32 q12, q0, q14 \n"
"vmla.f32 q13, q1, q15 \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"//q2 q3 = _r1
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k01
"add %8, %8, #256 \n"
"vmla.f32 q8, q2, q10 \n"
"vmla.f32 q9, q3, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k11
"add %9, %9, #256 \n"
"vmla.f32 q12, q2, q14 \n"
"vmla.f32 q13, q3, q15 \n"
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]!\n"//q4 q5 = _r2
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k02
"add %8, %8, #256 \n"
"vmla.f32 q8, q4, q10 \n"
"vmla.f32 q9, q5, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k12
"add %9, %9, #256 \n"
"vmla.f32 q12, q4, q14 \n"
"vmla.f32 q13, q5, q15 \n"
"pld [%7, #256] \n"
"vld1.f32 {d12-d15}, [%7 :128]!\n"//q6 q7 = _r3
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]\n"//q10 q11 = _k03
"sub %8, %8, #736 \n"
"vmla.f32 q8, q6, q10 \n"
"vmla.f32 q9, q7, q11 \n"
"pld [%9, #256] \n"
"vld1.f32 {d28-d31}, [%9 :128]\n"//q14 q15 = _k13
"sub %9, %9, #736 \n"
"vmla.f32 q12, q6, q14 \n"
"vmla.f32 q13, q7, q15 \n"
"vst1.f32 {d16-d19}, [%0 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]\n"//q8 q9 = _output2_tm
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k20
"add %10, %10, #256 \n"
"vmla.f32 q8, q0, q10 \n"
"vmla.f32 q9, q1, q11 \n"
"vst1.f32 {d24-d27}, [%1 :128]!\n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]\n"//q12 q13 = _output3_tm
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k30
"add %11, %11, #256 \n"
"vmla.f32 q12, q0, q14 \n"
"vmla.f32 q13, q1, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k21
"add %10, %10, #256 \n"
"vmla.f32 q8, q2, q10 \n"
"vmla.f32 q9, q3, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k31
"add %11, %11, #256 \n"
"vmla.f32 q12, q2, q14 \n"
"vmla.f32 q13, q3, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k22
"add %10, %10, #256 \n"
"vmla.f32 q8, q4, q10 \n"
"vmla.f32 q9, q5, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k32
"add %11, %11, #256 \n"
"vmla.f32 q12, q4, q14 \n"
"vmla.f32 q13, q5, q15 \n"
"pld [%10, #256] \n"
"vld1.f32 {d20-d23}, [%10 :128]\n"//q10 q11 = _k23
"sub %10, %10, #736 \n"
"vmla.f32 q8, q6, q10 \n"
"vmla.f32 q9, q7, q11 \n"
"pld [%11, #256] \n"
"vld1.f32 {d28-d31}, [%11 :128]\n"//q14 q15 = _k33
"sub %11, %11, #736 \n"
"vmla.f32 q12, q6, q14 \n"
"vmla.f32 q13, q7, q15 \n"
"vst1.f32 {d16-d19}, [%2 :128]!\n"
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]\n"//q8 q9 = _output0_tm
"subs r4, r4, #1 \n"
"vst1.f32 {d24-d27}, [%3 :128]!\n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(k00), // %8
"=r"(k10), // %9
"=r"(k20), // %10
"=r"(k30) // %11
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(k00),
"9"(k10),
"10"(k20),
"11"(k30)
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
k00 -= 64;
k10 -= 64;
k20 -= 64;
k30 -= 64;
#else
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k00[m];
k00 += 64;
output0_tm[m] += r1[m] * k00[m];
k00 += 64;
output0_tm[m] += r2[m] * k00[m];
k00 += 64;
output0_tm[m] += r3[m] * k00[m];
k00 += 64;
k00 -= 64 * 4;
output1_tm[m] += r0[m] * k10[m];
k10 += 64;
output1_tm[m] += r1[m] * k10[m];
k10 += 64;
output1_tm[m] += r2[m] * k10[m];
k10 += 64;
output1_tm[m] += r3[m] * k10[m];
k10 += 64;
k10 -= 64 * 4;
output2_tm[m] += r0[m] * k20[m];
k20 += 64;
output2_tm[m] += r1[m] * k20[m];
k20 += 64;
output2_tm[m] += r2[m] * k20[m];
k20 += 64;
output2_tm[m] += r3[m] * k20[m];
k20 += 64;
k20 -= 64 * 4;
output3_tm[m] += r0[m] * k30[m];
k30 += 64;
output3_tm[m] += r1[m] * k30[m];
k30 += 64;
output3_tm[m] += r2[m] * k30[m];
k30 += 64;
output3_tm[m] += r3[m] * k30[m];
k30 += 64;
k30 -= 64 * 4;
}
r0 += 64;
r1 += 64;
r2 += 64;
r3 += 64;
output0_tm += 64;
output1_tm += 64;
output2_tm += 64;
output3_tm += 64;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel1_tm.row(q);
const float* k2 = kernel2_tm.row(q);
const float* k3 = kernel3_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
// TODO neon optimize
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
output1_tm[m] += r0[m] * k1[m];
output2_tm[m] += r0[m] * k2[m];
output3_tm[m] += r0[m] * k3[m];
}
r0 += 64;
output0_tm += 64;
output1_tm += 64;
output2_tm += 64;
output3_tm += 64;
}
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
const float* k2 = kernel0_tm.row(q+2);
const float* k3 = kernel0_tm.row(q+3);
float* output0_tm = out0_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
#if __ARM_NEON
#if __aarch64__
for (int m=0; m+7<64; m+=8)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k2 = vld1q_f32(k2);
float32x4_t _k3 = vld1q_f32(k3);
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tm = vmlaq_f32(_output0_tm, _r2, _k2);
_output0_tm = vmlaq_f32(_output0_tm, _r3, _k3);
vst1q_f32(output0_tm, _output0_tm);
output0_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
float32x4_t _output0_tmn = vld1q_f32(output0_tm);
float32x4_t _r0n = vld1q_f32(r0);
float32x4_t _r1n = vld1q_f32(r1);
float32x4_t _r2n = vld1q_f32(r2);
float32x4_t _r3n = vld1q_f32(r3);
float32x4_t _k0n = vld1q_f32(k0);
float32x4_t _k1n = vld1q_f32(k1);
float32x4_t _k2n = vld1q_f32(k2);
float32x4_t _k3n = vld1q_f32(k3);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r2n, _k2n);
_output0_tmn = vmlaq_f32(_output0_tmn, _r3n, _k3n);
vst1q_f32(output0_tm, _output0_tmn);
output0_tm += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
}
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"mov r4, %0 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d24-d27}, [%0 :128]!\n"//q12 q13 = output0_tm
"vmla.f32 q15, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q13, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q12, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q13, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q12, q8, q10 \n"
"pld [%0, #256] \n"
"vld1.f32 {d28-d31}, [%0 :128]!\n"//q14 q15 = output0_tm
"vmla.f32 q13, q9, q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d0-d3}, [%1 :128]! \n"
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"vst1.f32 {d24-d27}, [r4 :128]!\n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%6, #256] \n"
"vld1.f32 {d20-d23}, [%6 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"pld [%3, #256] \n"
"vld1.f32 {d0-d3}, [%3 :128]! \n"
"vmla.f32 q15, q9, q11 \n"
"pld [%7, #256] \n"
"vld1.f32 {d4-d7}, [%7 :128]! \n"
"vmla.f32 q14, q0, q2 \n"
"pld [%4, #256] \n"
"vld1.f32 {d16-d19}, [%4 :128]!\n"
"vmla.f32 q15, q1, q3 \n"
"pld [%8, #256] \n"
"vld1.f32 {d20-d23}, [%8 :128]!\n"
"vmla.f32 q14, q8, q10 \n"
"vmla.f32 q15, q9, q11 \n"
"vst1.f32 {d28-d31}, [r4 :128]!\n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1), // %2
"=r"(r2), // %3
"=r"(r3), // %4
"=r"(k0), // %5
"=r"(k1), // %6
"=r"(k2), // %7
"=r"(k3) // %8
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"3"(r2),
"4"(r3),
"5"(k0),
"6"(k1),
"7"(k2),
"8"(k3)
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
k0 -= 64;
k1 -= 64;
k2 -= 64;
k3 -= 64;
#else
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
output0_tm[m] += r1[m] * k1[m];
output0_tm[m] += r2[m] * k2[m];
output0_tm[m] += r3[m] * k3[m];
}
r0 += 64;
r1 += 64;
r2 += 64;
r3 += 64;
output0_tm += 64;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
// tile
for (int i=0; i<h_tm/8 * w_tm/8; i++)
{
// TODO neon optimize
for (int m=0; m<64; m++)
{
output0_tm[m] += r0[m] * k0[m];
}
r0 += 64;
output0_tm += 64;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm = out0_tm.row(i * w_tm/8 + j);
float* output0 = out0.row(i * 6) + j * 6;
// TODO neon optimize
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm += 8;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s1_winograd64_neon2(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm01 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm23 = img0_tm.row(tiles + i * w_tm/8 + j);
float* r0_tm45 = img0_tm.row(tiles * 2 + i * w_tm/8 + j);
float* r0_tm67 = img0_tm.row(tiles * 3 + i * w_tm/8 + j);
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tms[4] = { r0_tm01, r0_tm23, r0_tm45, r0_tm67 };
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
float* r0_tm = r0_tms[m/2] + (m%2) * 8;
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(2*8, 4 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k0 = kernel0_tm.row(q);
const float* k1 = kernel0_tm.row(q+1);
float* output0_tm = out0_tm;
for (int r=0; r<4; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k0n = vld1q_f32(k0+4);
float32x4_t _k0nn = vld1q_f32(k0+8);
float32x4_t _k0nnn = vld1q_f32(k0+12);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _k1n = vld1q_f32(k1+4);
float32x4_t _k1nn = vld1q_f32(k1+8);
float32x4_t _k1nnn = vld1q_f32(k1+12);
#else
float32x4_t _k0;
float32x4_t _k0n;
float32x4_t _k0nn;
float32x4_t _k0nnn;
float32x4_t _k1;
float32x4_t _k1n;
float32x4_t _k1nn;
float32x4_t _k1nnn;
asm volatile(
"pld [%0, #512] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #512] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
"vld1.f32 {%e6-%f6}, [%0 :128]! \n"
"vld1.f32 {%e8-%f8}, [%1 :128]! \n"
"vld1.f32 {%e7-%f7}, [%0 :128]! \n"
"vld1.f32 {%e9-%f9}, [%1 :128]! \n"
: "=r"(k0), // %0
"=r"(k1), // %1
"=w"(_k0), // %2
"=w"(_k0n), // %3
"=w"(_k1), // %4
"=w"(_k1n), // %5
"=w"(_k0nn), // %6
"=w"(_k0nnn), // %7
"=w"(_k1nn), // %8
"=w"(_k1nnn) // %9
: "0"(k0),
"1"(k1)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"mov r4, %1 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"0: \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128]! \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [r4 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"subs %0, #1 \n"
"vst1.f32 {d20-d23}, [r4 :128]! \n"
"bne 0b \n"
"sub %1, #32 \n"
"sub %2, #64 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(r1),
"w"(_k0), // %8
"w"(_k0n), // %9
"w"(_k1), // %10
"w"(_k1n), // %11
"w"(_k0nn), // %12
"w"(_k0nnn), // %13
"w"(_k1nn), // %14
"w"(_k1nnn) // %15
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k1nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k1nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"mov r4, %0 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q9, q13, %q7 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"vmla.f32 q8, q14, %q8 \n"
"pld [%0, #256] \n"
"vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm
"vmla.f32 q9, q15, %q9 \n"
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"vst1.f32 {d16-d19}, [r4 :128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q10, q14, %q12 \n"
"vmla.f32 q11, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"w"(_k0), // %6
"w"(_k0n), // %7
"w"(_k1), // %8
"w"(_k1n), // %9
"w"(_k0nn), // %10
"w"(_k0nnn), // %11
"w"(_k1nn), // %12
"w"(_k1nnn) // %13
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<16; m++)
{
output0_tm[m] += r0[m] * k0[m];
output0_tm[m] += r1[m] * k1[m];
}
r0 += 16;
r1 += 16;
output0_tm += 16;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k0 += 16;
k1 += 16;
#endif // __aarch64__
#else
k0 += 16;
k1 += 16;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k0 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
for (int r=0; r<4; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k0 = vld1q_f32(k0);
float32x4_t _k0n = vld1q_f32(k0+4);
float32x4_t _k0nn = vld1q_f32(k0+8);
float32x4_t _k0nnn = vld1q_f32(k0+12);
#else
float32x4_t _k0;
float32x4_t _k0n;
float32x4_t _k0nn;
float32x4_t _k0nnn;
asm volatile(
"pld [%0, #512] \n"
"vld1.f32 {%e1-%f1}, [%0 :128]! \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e4-%f4}, [%0 :128]! \n"
: "=r"(k0), // %0
"=w"(_k0), // %1
"=w"(_k0n), // %2
"=w"(_k0nn), // %3
"=w"(_k0nnn) // %4
: "0"(k0)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k0nn);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k0nnn);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"mov r4, %0 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128]! \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q4 \n"
"vmla.f32 q9, q13, %q5 \n"
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d20-d23}, [%0 :128] \n"// q10 q11 = _output0_tm
"vmla.f32 q10, q12, %q6 \n"
"vst1.f32 {d16-d19}, [r4 :128] \n"
"vmla.f32 q11, q13, %q7 \n"
"vst1.f32 {d20-d23}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k0), // %4
"w"(_k0n), // %5
"w"(_k0nn), // %6
"w"(_k0nnn) // %7
: "cc", "memory", "r4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<16; m++)
{
output0_tm[m] += r0[m] * k0[m];
}
r0 += 16;
output0_tm += 16;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k0 += 16;
#endif // __aarch64__
#else
k0 += 16;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm01 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm23 = out0_tm.row(tiles + i * w_tm/8 + j);
const float* output0_tm45 = out0_tm.row(tiles * 2 + i * w_tm/8 + j);
const float* output0_tm67 = out0_tm.row(tiles * 3 + i * w_tm/8 + j);
float* output0 = out0.row(i * 6) + j * 6;
const float* output0_tms[4] = { output0_tm01, output0_tm23, output0_tm45, output0_tm67 };
for (int m=0; m<8; m++)
{
const float* output0_tm = output0_tms[m/2] + (m%2) * 8;
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s1_winograd64_neon3(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
const float* r0 = img0.row(i * 6) + j * 6;
float* r0_tm0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles * 2);
float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles * 3);
float* r0_tm4 = img0_tm.row(i * w_tm/8 + j + tiles * 4);
float* r0_tm5 = img0_tm.row(i * w_tm/8 + j + tiles * 5);
float* r0_tm6 = img0_tm.row(i * w_tm/8 + j + tiles * 6);
float* r0_tm7 = img0_tm.row(i * w_tm/8 + j + tiles * 7);
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25 - r0[4] * 1.25);
float tmp34b = (r0[1] * 0.5 - r0[3] * 2.5 + r0[5] * 2);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25) * 4);
float tmp56b = (r0[1] * 2 - r0[3] * 2.5 + r0[5] * 0.5);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tms[8] = { r0_tm0, r0_tm1, r0_tm2, r0_tm3, r0_tm4, r0_tm5, r0_tm6, r0_tm7 };
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
float* r0_tm = r0_tms[m];
r0_tm[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25;
r0_tm[7] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25 + tmp0[5]);
r0_tm[1] = tmp12a + tmp12b;
r0_tm[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25 - tmp0[4] * 1.25);
float tmp34b = (tmp0[1] * 0.5 - tmp0[3] * 2.5 + tmp0[5] * 2);
r0_tm[3] = tmp34a + tmp34b;
r0_tm[4] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25) * 4);
float tmp56b = (tmp0[1] * 2 - tmp0[3] * 2.5 + tmp0[5] * 0.5);
r0_tm[5] = tmp56a + tmp56b;
r0_tm[6] = tmp56a - tmp56b;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(8, 8 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
const float* k10 = kernel1_tm.row(q);
const float* k11 = kernel1_tm.row(q+1);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k01n = vld1q_f32(k01+4);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k10n = vld1q_f32(k10+4);
float32x4_t _k11 = vld1q_f32(k11);
float32x4_t _k11n = vld1q_f32(k11+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k01;
float32x4_t _k01n;
float32x4_t _k10;
float32x4_t _k10n;
float32x4_t _k11;
float32x4_t _k11n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e4-%f4}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e6-%f6}, [%1 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {%e8-%f8}, [%2 :128]! \n"
"pld [%3, #256] \n"
"vld1.f32 {%e10-%f10}, [%3 :128]! \n"
"vld1.f32 {%e5-%f5}, [%0 :128]! \n"
"vld1.f32 {%e7-%f7}, [%1 :128]! \n"
"vld1.f32 {%e9-%f9}, [%2 :128]! \n"
"vld1.f32 {%e11-%f11}, [%3 :128]! \n"
: "=r"(k00), // %0
"=r"(k01), // %1
"=r"(k10), // %2
"=r"(k11), // %3
"=w"(_k00), // %4
"=w"(_k00n), // %5
"=w"(_k01), // %6
"=w"(_k01n), // %7
"=w"(_k10), // %8
"=w"(_k10n), // %9
"=w"(_k11), // %10
"=w"(_k11n) // %11
: "0"(k00),
"1"(k01),
"2"(k10),
"3"(k11)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q10 \n"
"vmla.f32 q9, q13, %q11 \n"
"pld [%4, #256] \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q12 \n"
"vmla.f32 q9, q15, %q13 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q14 \n"
"vmla.f32 q11, q13, %q15 \n"
"vmla.f32 q10, q14, %q16 \n"
"vmla.f32 q11, q15, %q17 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0), // %3
"=r"(r1) // %4
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"4"(r1),
"w"(_k00), // %10
"w"(_k00n), // %11
"w"(_k01), // %12
"w"(_k01n), // %13
"w"(_k10), // %14
"w"(_k10n), // %15
"w"(_k11), // %16
"w"(_k11n) // %17
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
_output1_tm = vmlaq_f32(_output1_tm, _r1, _k11);
_output1_tmn = vmlaq_f32(_output1_tmn, _r1n, _k11n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
#else
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q12 \n"
"vmla.f32 q11, q13, %q13 \n"
"vmla.f32 q10, q14, %q14 \n"
"vmla.f32 q11, q15, %q15 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(output0_tm),
"1"(output1_tm),
"2"(r0),
"3"(r1),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k01), // %10
"w"(_k01n), // %11
"w"(_k10), // %12
"w"(_k10n), // %13
"w"(_k11), // %14
"w"(_k11n) // %15
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output0_tm[m] += r1[m] * k01[m];
output1_tm[m] += r0[m] * k10[m];
output1_tm[m] += r1[m] * k11[m];
}
r0 += 8;
r1 += 8;
output0_tm += 8;
output1_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k01 += 8;
k10 += 8;
k11 += 8;
#endif // __aarch64__
#else
k00 += 8;
k01 += 8;
k10 += 8;
k11 += 8;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
const float* k10 = kernel1_tm.row(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k10 = vld1q_f32(k10);
float32x4_t _k10n = vld1q_f32(k10+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k10;
float32x4_t _k10n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
: "=r"(k00), // %0
"=r"(k10), // %1
"=w"(_k00), // %2
"=w"(_k00n), // %3
"=w"(_k10), // %4
"=w"(_k10n) // %5
: "0"(k00),
"1"(k10)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_output1_tm = vld1q_f32(output1_tm);
_output1_tmn = vld1q_f32(output1_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"pld [%3, #256] \n"
"vld1.f32 {d24-d27}, [%3 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"pld [%2, #256] \n"
"vld1.f32 {d20-d23}, [%2 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q10 \n"
"vmla.f32 q11, q13, %q11 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%2 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(r0) // %3
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(r0),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k10), // %10
"w"(_k10n) // %11
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _output1_tm = vld1q_f32(output1_tm);
float32x4_t _output1_tmn = vld1q_f32(output1_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output1_tm = vmlaq_f32(_output1_tm, _r0, _k10);
_output1_tmn = vmlaq_f32(_output1_tmn, _r0n, _k10n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
vst1q_f32(output1_tm, _output1_tm);
vst1q_f32(output1_tm+4, _output1_tmn);
output0_tm += 8;
output1_tm += 8;
#else
asm volatile(
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"vmla.f32 q9, q13, %q7 \n"
"pld [%1, #256] \n"
"vld1.f32 {d20-d23}, [%1 :128] \n"// q10 q11 = _output1_tm
"vmla.f32 q10, q12, %q8 \n"
"vmla.f32 q11, q13, %q9 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(r0) // %2
: "0"(output0_tm),
"1"(output1_tm),
"2"(r0),
"w"(_k00), // %6
"w"(_k00n), // %7
"w"(_k10), // %8
"w"(_k10n) // %9
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output1_tm[m] += r0[m] * k10[m];
}
r0 += 8;
output0_tm += 8;
output1_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k10 += 8;
#endif // __aarch64__
#else
k00 += 8;
k10 += 8;
#endif // __ARM_NEON
}
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
out0_tm.fill(0.f);
int q = 0;
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* k00 = kernel0_tm.row(q);
const float* k01 = kernel0_tm.row(q+1);
float* output0_tm = out0_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
float32x4_t _k01 = vld1q_f32(k01);
float32x4_t _k01n = vld1q_f32(k01+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
float32x4_t _k01;
float32x4_t _k01n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {%e4-%f4}, [%1 :128]! \n"
"vld1.f32 {%e3-%f3}, [%0 :128]! \n"
"vld1.f32 {%e5-%f5}, [%1 :128]! \n"
: "=r"(k00), // %0
"=r"(k01), // %1
"=w"(_k00), // %2
"=w"(_k00n), // %3
"=w"(_k01), // %4
"=w"(_k01n) // %5
: "0"(k00),
"1"(k01)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
#if __ARM_NEON
int nn = tiles >> 2;
int remain = tiles & 3;
#else
int remain = tiles;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
for (; nn>0; nn--)
{
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
_output0_tm = vld1q_f32(output0_tm);
_output0_tmn = vld1q_f32(output0_tm+4);
_r0 = vld1q_f32(r0);
_r0n = vld1q_f32(r0+4);
_r1 = vld1q_f32(r1);
_r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
}
#else
if (nn > 0)
{
asm volatile(
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"pld [%2, #256] \n"
"vld1.f32 {d24-d27}, [%2 :128]! \n"// q12 q13 = _r0
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"pld [%1, #256] \n"
"vld1.f32 {d16-d19}, [%1 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q8 \n"
"vmla.f32 q9, q13, %q9 \n"
"pld [%3, #256] \n"
"vld1.f32 {d28-d31}, [%3 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q10 \n"
"vmla.f32 q9, q15, %q11 \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(r1) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(r1),
"w"(_k00), // %8
"w"(_k00n), // %9
"w"(_k01), // %10
"w"(_k01n) // %11
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _r1n = vld1q_f32(r1+4);
r0 += 8;
r1 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
_output0_tm = vmlaq_f32(_output0_tm, _r1, _k01);
_output0_tmn = vmlaq_f32(_output0_tmn, _r1n, _k01n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q6 \n"
"vmla.f32 q9, q13, %q7 \n"
"pld [%2, #256] \n"
"vld1.f32 {d28-d31}, [%2 :128]! \n"// q14 q15 = _r1
"vmla.f32 q8, q14, %q8 \n"
"vmla.f32 q9, q15, %q9 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0), // %1
"=r"(r1) // %2
: "0"(output0_tm),
"1"(r0),
"2"(r1),
"w"(_k00), // %6
"w"(_k00n), // %7
"w"(_k01), // %8
"w"(_k01n) // %9
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
output0_tm[m] += r1[m] * k01[m];
}
r0 += 8;
r1 += 8;
output0_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
k01 += 8;
#endif // __aarch64__
#else
k00 += 8;
k01 += 8;
#endif // __ARM_NEON
}
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* k00 = kernel0_tm.row(q);
float* output0_tm = out0_tm;
for (int r=0; r<8; r++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _k00 = vld1q_f32(k00);
float32x4_t _k00n = vld1q_f32(k00+4);
#else
float32x4_t _k00;
float32x4_t _k00n;
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {%e1-%f1}, [%0 :128]! \n"
"vld1.f32 {%e2-%f2}, [%0 :128]! \n"
: "=r"(k00), // %0
"=w"(_k00), // %1
"=w"(_k00n) // %2
: "0"(k00)
: "cc", "memory"
);
#endif // __aarch64__
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
float32x4_t _output0_tm = vld1q_f32(output0_tm);
float32x4_t _output0_tmn = vld1q_f32(output0_tm+4);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
r0 += 8;
_output0_tm = vmlaq_f32(_output0_tm, _r0, _k00);
_output0_tmn = vmlaq_f32(_output0_tmn, _r0n, _k00n);
vst1q_f32(output0_tm, _output0_tm);
vst1q_f32(output0_tm+4, _output0_tmn);
output0_tm += 8;
#else
asm volatile(
"pld [%1, #256] \n"
"vld1.f32 {d24-d27}, [%1 :128]! \n"// q12 q13 = _r0
"pld [%0, #256] \n"
"vld1.f32 {d16-d19}, [%0 :128] \n"// q8 q9 = _output0_tm
"vmla.f32 q8, q12, %q4 \n"
"vmla.f32 q9, q13, %q5 \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00), // %4
"w"(_k00n) // %5
: "cc", "memory", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
for (int m=0; m<8; m++)
{
output0_tm[m] += r0[m] * k00[m];
}
r0 += 8;
output0_tm += 8;
#endif // __ARM_NEON
}
#if __ARM_NEON
#if __aarch64__
k00 += 8;
#endif // __aarch64__
#else
k00 += 8;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles * 2);
const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles * 3);
const float* output0_tm4 = out0_tm.row(i * w_tm/8 + j + tiles * 4);
const float* output0_tm5 = out0_tm.row(i * w_tm/8 + j + tiles * 5);
const float* output0_tm6 = out0_tm.row(i * w_tm/8 + j + tiles * 6);
const float* output0_tm7 = out0_tm.row(i * w_tm/8 + j + tiles * 7);
float* output0 = out0.row(i * 6) + j * 6;
const float* output0_tms[8] = { output0_tm0, output0_tm1, output0_tm2, output0_tm3, output0_tm4, output0_tm5, output0_tm6, output0_tm7 };
for (int m=0; m<8; m++)
{
const float* output0_tm = output0_tms[m];
float tmp024a = output0_tm[1] + output0_tm[2];
float tmp135a = output0_tm[1] - output0_tm[2];
float tmp024b = output0_tm[3] + output0_tm[4];
float tmp135b = output0_tm[3] - output0_tm[4];
float tmp024c = output0_tm[5] + output0_tm[6];
float tmp135c = output0_tm[5] - output0_tm[6];
tmp[0][m] = output0_tm[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm[7] + tmp135a + tmp135b * 32 + tmp135c;
}
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
#endif
static void conv3x3s1_winograd64_neon4(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
bottom_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, inch);
const int tiles = w_tm/8 * h_tm/8;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff+4);
#endif // __ARM_NEON
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w*2;
const float* r3 = r0 + w*3;
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0+4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1+4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2+4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3+4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w*4;
r1 += w*4;
r2 += w*4;
r3 += w*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2+4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0_0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1_0[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2_0[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3_0[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0_4[3] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1_4[3] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2_4[3] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3_4[3] = vgetq_lane_f32(_r0_tm_4_3, 3);
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0_0[1] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1_0[1] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2_0[1] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3_0[1] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0_0[2] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1_0[2] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2_0[2] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3_0[2] = vgetq_lane_f32(_r0_tm_0_2, 3);
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0_0[3] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1_0[3] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2_0[3] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3_0[3] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0_4[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1_4[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2_4[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3_4[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0_4[1] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1_4[1] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2_4[1] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3_4[1] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0_4[2] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1_4[2] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2_4[2] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3_4[2] = vgetq_lane_f32(_r0_tm_4_2, 3);
t0 += 8*4;
t1 += 8*4;
t2 += 8*4;
t3 += 8*4;
r0_tm0_0 += img0_tm.w*tiles*2*4;
r0_tm0_4 += img0_tm.w*tiles*2*4;
r0_tm1_0 += img0_tm.w*tiles*2*4;
r0_tm1_4 += img0_tm.w*tiles*2*4;
r0_tm2_0 += img0_tm.w*tiles*2*4;
r0_tm2_4 += img0_tm.w*tiles*2*4;
r0_tm3_0 += img0_tm.w*tiles*2*4;
r0_tm3_4 += img0_tm.w*tiles*2*4;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
float* t4 = tmp[4];
float* t5 = tmp[5];
float* t6 = tmp[6];
float* t7 = tmp[7];
int stepw = w*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8], %26 \n"
"vld1.f32 {d20-d23}, [%9], %26 \n"
"vld1.f32 {d24-d27}, [%10], %26 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11], %26 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(t4), // %4
"=r"(t5), // %5
"=r"(t6), // %6
"=r"(t7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(r3) // %11
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(t4),
"5"(t5),
"6"(t6),
"7"(t7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(r3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(stepw) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*7);
int step = img0_tm.w*tiles*2*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8] \n"
"add %8, %8, #128 \n"
"vld1.f32 {d20-d23}, [%9] \n"
"add %9, %9, #128 \n"
"vld1.f32 {d24-d27}, [%10] \n"
"add %10, %10, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"add %11, %11, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0]! \n"
"vst1.f32 {d4[1]}, [%2]! \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%4]! \n"
"vst1.f32 {d5[1]}, [%6]! \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%4]! \n"
"vst1.f32 {d17[1]}, [%6]! \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0]! \n"
"vst1.f32 {d18[1]}, [%2]! \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%4]! \n"
"vst1.f32 {d19[1]}, [%6]! \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%2], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d17[0]}, [%4], %26 \n"
"vst1.f32 {d17[1]}, [%6], %26 \n"
"vtrn.32 q9, q2 \n"
"vtrn.32 q3, q6 \n"
"sub %0, %0, #12 \n"
"sub %2, %2, #12 \n"
"sub %4, %4, #12 \n"
"sub %6, %6, #12 \n"
"vswp d19, d6 \n"
"vswp d5, d12 \n"
"vst1.f32 {d18-d19}, [%1], %26 \n"
"vst1.f32 {d4-d5}, [%3], %26 \n"
"vst1.f32 {d6-d7}, [%5], %26 \n"
"vst1.f32 {d12-d13}, [%7], %26 \n"
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0]! \n"
"vst1.f32 {d4[1]}, [%2]! \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%4]! \n"
"vst1.f32 {d5[1]}, [%6]! \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%4]! \n"
"vst1.f32 {d17[1]}, [%6]! \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0]! \n"
"vst1.f32 {d18[1]}, [%2]! \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%4]! \n"
"vst1.f32 {d19[1]}, [%6]! \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16[0]}, [%0] \n"
"vst1.f32 {d16[1]}, [%2] \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d17[0]}, [%4] \n"
"vst1.f32 {d17[1]}, [%6] \n"
"vtrn.32 q9, q2 \n"
"vtrn.32 q3, q6 \n"
"vswp d19, d6 \n"
"vswp d5, d12 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"vst1.f32 {d4-d5}, [%3] \n"
"vst1.f32 {d6-d7}, [%5] \n"
"vst1.f32 {d12-d13}, [%7] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm0_4), // %1
"=r"(r0_tm1_0), // %2
"=r"(r0_tm1_4), // %3
"=r"(r0_tm2_0), // %4
"=r"(r0_tm2_4), // %5
"=r"(r0_tm3_0), // %6
"=r"(r0_tm3_4), // %7
"=r"(t0), // %8
"=r"(t1), // %9
"=r"(t2), // %10
"=r"(t3) // %11
: "0"(r0_tm0_0),
"1"(r0_tm0_4),
"2"(r0_tm1_0),
"3"(r0_tm1_4),
"4"(r0_tm2_0),
"5"(r0_tm2_4),
"6"(r0_tm3_0),
"7"(r0_tm3_4),
"8"(t0),
"9"(t1),
"10"(t2),
"11"(t3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(step) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles);
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_4[3] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_0[1] = tmp12a + tmp12b;
r0_tm_0[2] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_0[3] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_4[1] = tmp56a + tmp56b;
r0_tm_4[2] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 2;
r0_tm_4 += img0_tm.w * tiles * 2;
}
#endif // __ARM_NEON
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
top_blob_tm.create(4, 16 * w_tm/8 * h_tm/8, outch);
const int tiles = h_tm/8 * w_tm/8;
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const float* ktm = kernel_tm.channel(pp);
out0_tm.fill(0.f);
out1_tm.fill(0.f);
out2_tm.fill(0.f);
out3_tm.fill(0.f);
int q = 0;
#if __ARM_NEON && __aarch64__
for (; q+3<inch; q+=4)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
const float* r2 = bottom_blob_tm.channel(q+2);
const float* r3 = bottom_blob_tm.channel(q+3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
asm volatile(
"mov w0, #16 \n"// w0 = r = 16
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%8], #64 \n"// v0 v1 v2 v3 = _k00 _k01 _k02 _k03
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%8], #64 \n"// v4 v5 v6 v7 = _k10 _k11 _k12 _k13
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"// v8 v9 v10 v11 = _k20 _k21 _k22 _k23
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"// v12 v13 v14 v15 = _k30 _k31 _k32 _k33
// tile loop
"lsr w1, %w18, #2 \n"// w1 = nn = tiles >> 2
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"prfm pldl1keep, [%4, #128] \n"//
"ld1 {v16.4s}, [%4], #16 \n"
"1: \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"add x4, %0, #16 \n"// x4 = %0 next
"fmla v20.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"add x5, %1, #16 \n"// x5 = %1 next
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"add x6, %2, #16 \n"// x6 = %2 next
"fmla v22.4s, v16.4s, v8.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"add x7, %3, #16 \n"// x7 = %3 next
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%0] \n"
"add %0, %0, #32 \n"
"fmla v24.4s, v16.4s, v0.4s \n"
"fmla v25.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v26.4s, v16.4s, v8.4s \n"
"fmla v27.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"st1 {v21.4s}, [%1] \n"
"add %1, %1, #32 \n"
"fmla v24.4s, v17.4s, v1.4s \n"
"fmla v25.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v26.4s, v17.4s, v9.4s \n"
"fmla v27.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"st1 {v22.4s}, [%2] \n"
"add %2, %2, #32 \n"
"fmla v24.4s, v18.4s, v2.4s \n"
"fmla v25.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v26.4s, v18.4s, v10.4s \n"
"fmla v27.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"st1 {v23.4s}, [%3] \n"
"add %3, %3, #32 \n"
"fmla v24.4s, v19.4s, v3.4s \n"
"fmla v25.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v26.4s, v19.4s, v11.4s \n"
"fmla v27.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"st1 {v24.4s}, [x4] \n"
"add x4, x4, #32 \n"
"fmla v20.4s, v16.4s, v0.4s \n"
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v22.4s, v16.4s, v8.4s \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [x4, #128] \n"
"ld1 {v24.4s}, [x4] \n"
"st1 {v25.4s}, [x5] \n"
"add x5, x5, #32 \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v25.4s}, [x5] \n"
"st1 {v26.4s}, [x6] \n"
"add x6, x6, #32 \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [x6, #128] \n"
"ld1 {v26.4s}, [x6] \n"
"st1 {v27.4s}, [x7] \n"
"add x7, x7, #32 \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
///////
"prfm pldl1keep, [x7, #128] \n"
"ld1 {v27.4s}, [x7] \n"
"st1 {v20.4s}, [%0] \n"
"fmla v24.4s, v16.4s, v0.4s \n"
"fmla v25.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v26.4s, v16.4s, v8.4s \n"
"fmla v27.4s, v16.4s, v12.4s \n"
"st1 {v21.4s}, [%1] \n"
"fmla v24.4s, v17.4s, v1.4s \n"
"fmla v25.4s, v17.4s, v5.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v26.4s, v17.4s, v9.4s \n"
"fmla v27.4s, v17.4s, v13.4s \n"
"st1 {v22.4s}, [%2] \n"
"fmla v24.4s, v18.4s, v2.4s \n"
"fmla v25.4s, v18.4s, v6.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v26.4s, v18.4s, v10.4s \n"
"fmla v27.4s, v18.4s, v14.4s \n"
"st1 {v23.4s}, [%3] \n"
"fmla v24.4s, v19.4s, v3.4s \n"
"fmla v25.4s, v19.4s, v7.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"fmla v26.4s, v19.4s, v11.4s \n"
"fmla v27.4s, v19.4s, v15.4s \n"
"st1 {v24.4s}, [x4], #16 \n"
"mov %0, x4 \n"
"st1 {v25.4s}, [x5], #16 \n"
"mov %1, x5 \n"
"subs w1, w1, #1 \n"
"st1 {v26.4s}, [x6], #16 \n"
"mov %2, x6 \n"
"st1 {v27.4s}, [x7], #16 \n"
"mov %3, x7 \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and w1, %w18, #3 \n"// w1 = remain = tiles & 3;
"cmp w1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v20.4s}, [%0] \n"
"fmla v20.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v21.4s}, [%1] \n"
"fmla v21.4s, v16.4s, v4.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v22.4s}, [%2] \n"
"fmla v22.4s, v16.4s, v8.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v23.4s}, [%3] \n"
"fmla v23.4s, v16.4s, v12.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v17.4s}, [%5], #16 \n"
"fmla v20.4s, v17.4s, v1.4s \n"
"fmla v21.4s, v17.4s, v5.4s \n"
"fmla v22.4s, v17.4s, v9.4s \n"
"fmla v23.4s, v17.4s, v13.4s \n"
"prfm pldl1keep, [%6, #128] \n"
"ld1 {v18.4s}, [%6], #16 \n"
"fmla v20.4s, v18.4s, v2.4s \n"
"fmla v21.4s, v18.4s, v6.4s \n"
"fmla v22.4s, v18.4s, v10.4s \n"
"fmla v23.4s, v18.4s, v14.4s \n"
"prfm pldl1keep, [%7, #128] \n"
"ld1 {v19.4s}, [%7], #16 \n"
"fmla v20.4s, v19.4s, v3.4s \n"
"fmla v21.4s, v19.4s, v7.4s \n"
"fmla v22.4s, v19.4s, v11.4s \n"
"fmla v23.4s, v19.4s, v15.4s \n"
"st1 {v20.4s}, [%0], #16 \n"
"st1 {v21.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v22.4s}, [%2], #16 \n"
"st1 {v23.4s}, [%3], #16 \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(r2), // %6
"=r"(r3), // %7
"=r"(ktm) // %8
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(r2),
"7"(r3),
"8"(ktm),
"r"(tiles) // %18
: "cc", "memory", "x0", "x1", "x4", "x5", "x6", "x7", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"
);
}
#endif // __ARM_NEON && __aarch64__
for (; q+1<inch; q+=2)
{
const float* r0 = bottom_blob_tm.channel(q);
const float* r1 = bottom_blob_tm.channel(q+1);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
#if __ARM_NEON
#if __aarch64__
asm volatile(
"mov w0, #16 \n"// w0 = r = 16
"0: \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v0.4s, v1.4s}, [%6], #32 \n"// v0 v1 = _k00 _k01
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v2.4s, v3.4s}, [%6], #32 \n"// v2 v3 = _k10 _k11
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4s, v5.4s}, [%6], #32 \n"// v4 v5 = _k20 _k21
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v6.4s, v7.4s}, [%6], #32 \n"// v6 v7 = _k30 _k31
// tile loop
"lsr w1, %w14, #2 \n"// w1 = nn = tiles >> 2
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"1: \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
////
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and w1, %w14, #3 \n"// w1 = remain = tiles & 3;
"cmp w1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v20.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v20.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1] \n"
"fmla v17.4s, v20.4s, v2.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v18.4s}, [%2] \n"
"fmla v18.4s, v20.4s, v4.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v19.4s}, [%3] \n"
"fmla v19.4s, v20.4s, v6.4s \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v21.4s}, [%5], #16 \n"
"fmla v16.4s, v21.4s, v1.4s \n"
"fmla v17.4s, v21.4s, v3.4s \n"
"fmla v18.4s, v21.4s, v5.4s \n"
"fmla v19.4s, v21.4s, v7.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(ktm) // %6
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(ktm),
"r"(tiles) // %14
: "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21"
);
#else
asm volatile(
"mov r0, #16 \n"// r0 = r = 16
"0: \n"
"pld [%6, #256] \n"
"vld1.f32 {d0-d3}, [%6 :128]! \n"// q0 q1 = _k00 _k01
"pld [%6, #256] \n"
"vld1.f32 {d4-d7}, [%6 :128]! \n"// q2 q3 = _k10 _k11
"pld [%6, #256] \n"
"vld1.f32 {d8-d11}, [%6 :128]! \n"// q4 q5 = _k20 _k21
"pld [%6, #256] \n"
"vld1.f32 {d12-d15}, [%6 :128]! \n"// q6 q7 = _k30 _k31
// tile loop
"lsr r1, %14, #2 \n"// r1 = nn = tiles >> 2
"cmp r1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"1: \n"
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
////
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 1b \n"
"sub %4, %4, #16 \n"
//END tile loop
"2: \n"
// remain loop
"and r1, %14, #3 \n"// r1 = remain = tiles & 3;
"cmp r1, #0 \n"
"beq 4f \n"
//BEGIN remain loop
"3: \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q2 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q4 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q6 \n"
"pld [%5, #128] \n"
"vld1.f32 {d26-d27}, [%5 :128]! \n"// q13 = _r1
"vmla.f32 q8, q13, q1 \n"
"vmla.f32 q9, q13, q3 \n"
"vmla.f32 q10, q13, q5 \n"
"vmla.f32 q11, q13, q7 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 3b \n"
//END remain loop
"4: \n"
"subs r0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(r1), // %5
"=r"(ktm) // %6
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(r1),
"6"(ktm),
"r"(tiles) // %14
: "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int r=0; r<16; r++)
{
for (int t=0; t<tiles; t++)
{
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * ktm[0 +m];
output0_tm[m] += r1[m] * ktm[4 +m];
output1_tm[m] += r0[m] * ktm[8 +m];
output1_tm[m] += r1[m] * ktm[12+m];
output2_tm[m] += r0[m] * ktm[16+m];
output2_tm[m] += r1[m] * ktm[20+m];
output3_tm[m] += r0[m] * ktm[24+m];
output3_tm[m] += r1[m] * ktm[28+m];
}
r0 += 4;
r1 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
ktm += 32;
}
#endif // __ARM_NEON
}
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
#if __ARM_NEON
#if __aarch64__
asm volatile(
"mov w0, #16 \n"// w0 = r = 16
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4s, v1.4s}, [%5], #32 \n"// v0 v1 = _k00 _k10
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v2.4s, v3.4s}, [%5], #32 \n"// v2 v3 = _k20 _k30
// tile loop
"mov w1, %w12 \n"// w1 = tiles
"cmp w1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"1: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v16.4s}, [%4], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v17.4s}, [%0] \n"
"fmla v17.4s, v16.4s, v0.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v18.4s}, [%1] \n"
"fmla v18.4s, v16.4s, v1.4s \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v19.4s}, [%2] \n"
"fmla v19.4s, v16.4s, v2.4s \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v20.4s}, [%3] \n"
"fmla v20.4s, v16.4s, v3.4s \n"
"st1 {v17.4s}, [%0], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"subs w1, w1, #1 \n"
"st1 {v19.4s}, [%2], #16 \n"
"st1 {v20.4s}, [%3], #16 \n"
"bne 1b \n"
//END tile loop
"2: \n"
"subs w0, w0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(ktm) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(ktm),
"r"(tiles) // %12
: "cc", "memory", "x0", "x1", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19", "v20"
);
#else
asm volatile(
"mov r0, #16 \n"// r0 = r = 16
"0: \n"
"pld [%5, #256] \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"// q0 q1 = _k00 _k10
"pld [%5, #256] \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"// q2 q3 = _k20 _k30
// tile loop
"mov r1, %12 \n"// r1 = tiles
"cmp r1, #0 \n"
"beq 2f \n"
//BEGIN tile loop
"1: \n"
"pld [%4, #128] \n"
"vld1.f32 {d24-d25}, [%4 :128]! \n"// q12 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q12, q0 \n"
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128] \n"// q9 = _output1_tm
"vmla.f32 q9, q12, q1 \n"
"pld [%2, #128] \n"
"vld1.f32 {d20-d21}, [%2 :128] \n"// q10 = _output2_tm
"vmla.f32 q10, q12, q2 \n"
"pld [%3, #128] \n"
"vld1.f32 {d22-d23}, [%3 :128] \n"// q11 = _output3_tm
"vmla.f32 q11, q12, q3 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"subs r1, #1 \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
"bne 1b \n"
//END tile loop
"2: \n"
"subs r0, #1 \n"
"bne 0b \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(r0), // %4
"=r"(ktm) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(r0),
"5"(ktm),
"r"(tiles) // %12
: "cc", "memory", "r0", "r1", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13"
);
#endif // __aarch64__
#else
for (int r=0; r<16; r++)
{
for (int t=0; t<tiles; t++)
{
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * ktm[0 +m];
output1_tm[m] += r0[m] * ktm[4 +m];
output2_tm[m] += r0[m] * ktm[8 +m];
output3_tm[m] += r0[m] * ktm[12+m];
}
r0 += 4;
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
}
ktm += 16;
}
#endif // __ARM_NEON
}
}
#pragma omp parallel for
for (int p = remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const float* ktm = (const float*)kernel_tm.channel(nn_outch) + 8*8 * inch * (p-remain_outch_start);
out0_tm.fill(0.f);
int q = 0;
for (; q<inch; q++)
{
const float* r0 = bottom_blob_tm.channel(q);
float* output0_tm = out0_tm;
for (int r=0; r<16; r++)
{
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(ktm); ktm += 4;
#endif // __ARM_NEON
// tile
for (int i=0; i<tiles; i++)
{
#if __ARM_NEON
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v17.4s}, [%1], #16 \n"
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v16.4s}, [%0] \n"
"fmla v16.4s, v17.4s, %4.4s \n"
"st1 {v16.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "v16", "v17"
);
#else
asm volatile(
"pld [%1, #128] \n"
"vld1.f32 {d18-d19}, [%1 :128]! \n"// q9 = _r0
"pld [%0, #128] \n"
"vld1.f32 {d16-d17}, [%0 :128] \n"// q8 = _output0_tm
"vmla.f32 q8, q9, %q4 \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(output0_tm), // %0
"=r"(r0) // %1
: "0"(output0_tm),
"1"(r0),
"w"(_k00) // %4
: "cc", "memory", "q8", "q9"
);
#endif // __aarch64__
#else
for (int m=0; m<4; m++)
{
output0_tm[m] += r0[m] * ktm[m];
}
r0 += 4;
output0_tm += 4;
#endif // __ARM_NEON
}
#if !__ARM_NEON
ktm += 4;
#endif // __ARM_NEON
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = { 4.f, 8.f, 16.f, 32.f };
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
#if __ARM_NEON
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*2);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*3);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*4);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*5);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*6);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*7);
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _output0_tm0_0123 = vld1q_f32(output0_tm0_0);
float32x4_t _output0_tm0_4567 = vld1q_f32(output0_tm0_4);
float32x4_t _output0_tm1_0123 = vld1q_f32(output0_tm1_0);
float32x4_t _output0_tm1_4567 = vld1q_f32(output0_tm1_4);
float32x4_t _output0_tm2_0123 = vld1q_f32(output0_tm2_0);
float32x4_t _output0_tm2_4567 = vld1q_f32(output0_tm2_4);
float32x4_t _output0_tm3_0123 = vld1q_f32(output0_tm3_0);
float32x4_t _output0_tm3_4567 = vld1q_f32(output0_tm3_4);
float32x4x2_t _output0_tm01_00221133 = vtrnq_f32(_output0_tm0_0123, _output0_tm1_0123);
float32x4x2_t _output0_tm01_44665577 = vtrnq_f32(_output0_tm0_4567, _output0_tm1_4567);
float32x4x2_t _output0_tm23_00221133 = vtrnq_f32(_output0_tm2_0123, _output0_tm3_0123);
float32x4x2_t _output0_tm23_44665577 = vtrnq_f32(_output0_tm2_4567, _output0_tm3_4567);
// no vswp intrinsic :(
float32x4_t _output0_tm_00 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[0]), vget_low_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_11 = vcombine_f32(vget_low_f32(_output0_tm01_00221133.val[1]), vget_low_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_22 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[0]), vget_high_f32(_output0_tm23_00221133.val[0]));
float32x4_t _output0_tm_33 = vcombine_f32(vget_high_f32(_output0_tm01_00221133.val[1]), vget_high_f32(_output0_tm23_00221133.val[1]));
float32x4_t _output0_tm_44 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[0]), vget_low_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_55 = vcombine_f32(vget_low_f32(_output0_tm01_44665577.val[1]), vget_low_f32(_output0_tm23_44665577.val[1]));
float32x4_t _output0_tm_66 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[0]), vget_high_f32(_output0_tm23_44665577.val[0]));
float32x4_t _output0_tm_77 = vcombine_f32(vget_high_f32(_output0_tm01_44665577.val[1]), vget_high_f32(_output0_tm23_44665577.val[1]));
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0_0 += out0_tm.w * tiles * 2*4;
output0_tm0_4 += out0_tm.w * tiles * 2*4;
output0_tm1_0 += out0_tm.w * tiles * 2*4;
output0_tm1_4 += out0_tm.w * tiles * 2*4;
output0_tm2_0 += out0_tm.w * tiles * 2*4;
output0_tm2_4 += out0_tm.w * tiles * 2*4;
output0_tm3_0 += out0_tm.w * tiles * 2*4;
output0_tm3_4 += out0_tm.w * tiles * 2*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m=0; m+1<6; m+=2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8*2;
t1 += 8*2;
output0 += outw*2;
output1 += outw*2;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
int step = out0_tm.w * tiles * 2*4 *4;
asm volatile(
// loop0
"vld1.f32 {d16-d17}, [%2], %21 \n"
"vld1.f32 {d18-d19}, [%3], %21 \n"
"vld1.f32 {d20-d21}, [%4], %21 \n"
"vld1.f32 {d22-d23}, [%5], %21 \n"
"vld1.f32 {d24-d25}, [%6], %21 \n"
"vld1.f32 {d26-d27}, [%7], %21 \n"
"vld1.f32 {d28-d29}, [%8], %21 \n"
"vld1.f32 {d30-d31}, [%9], %21 \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
"vld1.f32 {d16-d17}, [%2] \n"
"vld1.f32 {d18-d19}, [%3] \n"
"vld1.f32 {d20-d21}, [%4] \n"
"vld1.f32 {d22-d23}, [%5] \n"
"vld1.f32 {d24-d25}, [%6] \n"
"vld1.f32 {d26-d27}, [%7] \n"
"vld1.f32 {d28-d29}, [%8] \n"
"vld1.f32 {d30-d31}, [%9] \n"
"vtrn.32 q8, q10 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm0_4), // %3
"=r"(output0_tm1_0), // %4
"=r"(output0_tm1_4), // %5
"=r"(output0_tm2_0), // %6
"=r"(output0_tm2_4), // %7
"=r"(output0_tm3_0), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm0_4),
"4"(output0_tm1_0),
"5"(output0_tm1_4),
"6"(output0_tm2_0),
"7"(output0_tm2_4),
"8"(output0_tm3_0),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw*2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles);
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm_0[1] + output0_tm_0[2];
float tmp135a = output0_tm_0[1] - output0_tm_0[2];
float tmp024b = output0_tm_0[3] + output0_tm_4[0];
float tmp135b = output0_tm_0[3] - output0_tm_4[0];
float tmp024c = output0_tm_4[1] + output0_tm_4[2];
float tmp135c = output0_tm_4[1] - output0_tm_4[2];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_4[3] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 2;
output0_tm_4 += out0_tm.w * tiles * 2;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
#if !__aarch64__
// TODO drop it
static inline float vaddvq_f32(float32x4_t _v)
{
float32x2_t _ss = vadd_f32(vget_low_f32(_v), vget_high_f32(_v));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
return vget_lane_f32(_ss2, 0);
}
#endif // !__aarch64__
static void conv3x3s1_winograd64_neon5(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& _bias)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f);
const float* bias = _bias;
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
bottom_blob_tm.create(1, 64 * tiles, inch);
// bottom_blob_tm.create(inch, tiles, 64);
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#if __ARM_NEON
const float coeff[8] = {
0.25f, 0.5f, -1.25f, 2.f,
-2.5f, 4.f, 4.25f, 5.25f
};
float32x4_t _coeff0 = vld1q_f32(coeff);
float32x4_t _coeff1 = vld1q_f32(coeff+4);
#endif // __ARM_NEON
#pragma omp parallel for
for (int q = 0; q<inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
float tmp[8][8];
// tile
for (int i=0; i<h_tm/8; i++)
{
for (int j=0; j<w_tm/8; j++)
{
#if __ARM_NEON
const float* r0 = img0.row(i * 6) + j * 6;
const float* r1 = r0 + w;
const float* r2 = r0 + w*2;
const float* r3 = r0 + w*3;
#if __aarch64__
for (int m=0; m+3<8; m+=4)
{
float32x4_t _r0_0123 = vld1q_f32(r0);
float32x4_t _r0_4567 = vld1q_f32(r0+4);
float32x4_t _r1_0123 = vld1q_f32(r1);
float32x4_t _r1_4567 = vld1q_f32(r1+4);
float32x4_t _r2_0123 = vld1q_f32(r2);
float32x4_t _r2_4567 = vld1q_f32(r2+4);
float32x4_t _r3_0123 = vld1q_f32(r3);
float32x4_t _r3_4567 = vld1q_f32(r3+4);
float32x4x2_t _r01_00221133 = vtrnq_f32(_r0_0123, _r1_0123);
float32x4x2_t _r01_44665577 = vtrnq_f32(_r0_4567, _r1_4567);
float32x4x2_t _r23_00221133 = vtrnq_f32(_r2_0123, _r3_0123);
float32x4x2_t _r23_44665577 = vtrnq_f32(_r2_4567, _r3_4567);
// no vswp intrinsic :(
float32x4_t _r_00 = vcombine_f32(vget_low_f32(_r01_00221133.val[0]), vget_low_f32(_r23_00221133.val[0]));
float32x4_t _r_11 = vcombine_f32(vget_low_f32(_r01_00221133.val[1]), vget_low_f32(_r23_00221133.val[1]));
float32x4_t _r_22 = vcombine_f32(vget_high_f32(_r01_00221133.val[0]), vget_high_f32(_r23_00221133.val[0]));
float32x4_t _r_33 = vcombine_f32(vget_high_f32(_r01_00221133.val[1]), vget_high_f32(_r23_00221133.val[1]));
float32x4_t _r_44 = vcombine_f32(vget_low_f32(_r01_44665577.val[0]), vget_low_f32(_r23_44665577.val[0]));
float32x4_t _r_55 = vcombine_f32(vget_low_f32(_r01_44665577.val[1]), vget_low_f32(_r23_44665577.val[1]));
float32x4_t _r_66 = vcombine_f32(vget_high_f32(_r01_44665577.val[0]), vget_high_f32(_r23_44665577.val[0]));
float32x4_t _r_77 = vcombine_f32(vget_high_f32(_r01_44665577.val[1]), vget_high_f32(_r23_44665577.val[1]));
float32x4_t _r_0_m_6 = vsubq_f32(_r_00, _r_66);
float32x4_t _r_7_m_1 = vsubq_f32(_r_77, _r_11);
float32x4_t _r_4_m_2 = vsubq_f32(_r_44, _r_22);
float32x4_t _r_3_m_5 = vsubq_f32(_r_33, _r_55);
float32x4_t _tmp0 = vmlaq_lane_f32(_r_0_m_6, _r_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _tmp7 = vmlaq_lane_f32(_r_7_m_1, _r_3_m_5, vget_high_f32(_coeff1), 1);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[7][m], _tmp7);
float32x4_t _r_2_a_6 = vaddq_f32(_r_22, _r_66);
float32x4_t _r_1_a_5 = vaddq_f32(_r_11, _r_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_r_2_a_6, _r_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_r_1_a_5, _r_33, vget_high_f32(_coeff1), 0);
float32x4_t _tmp1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _tmp2 = vsubq_f32(_tmp12a, _tmp12b);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[2][m], _tmp2);
float32x4_t _r_4_x_c = vmulq_lane_f32(_r_44, vget_high_f32(_coeff0), 0);
float32x4_t _r_3_x_c = vmulq_lane_f32(_r_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_r_66, _r_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _r_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _r_55, vget_high_f32(_coeff0), 1);
float32x4_t _tmp3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _tmp4 = vsubq_f32(_tmp34a, _tmp34b);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[4][m], _tmp4);
// reuse r04 * 1.25
// reuse r03 * 2.5
float32x4_t _r_2_a_4c = vaddq_f32(_r_22, _r_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_r_66, _r_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_r_3_x_c, _r_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _r_55, vget_low_f32(_coeff0), 1);
float32x4_t _tmp5 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _tmp6 = vsubq_f32(_tmp56a, _tmp56b);
vst1q_f32(&tmp[5][m], _tmp5);
vst1q_f32(&tmp[6][m], _tmp6);
r0 += w*4;
r1 += w*4;
r2 += w*4;
r3 += w*4;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
const float* t2 = tmp[2];
const float* t3 = tmp[3];
float* r0_tm0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3 = img0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4_t _t2_0123 = vld1q_f32(t2);
float32x4_t _t2_4567 = vld1q_f32(t2+4);
float32x4_t _t3_0123 = vld1q_f32(t3);
float32x4_t _t3_4567 = vld1q_f32(t3+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x4x2_t _t23_00221133 = vtrnq_f32(_t2_0123, _t3_0123);
float32x4x2_t _t23_44665577 = vtrnq_f32(_t2_4567, _t3_4567);
// no vswp intrinsic :(
float32x4_t _t_00 = vcombine_f32(vget_low_f32(_t01_00221133.val[0]), vget_low_f32(_t23_00221133.val[0]));
float32x4_t _t_11 = vcombine_f32(vget_low_f32(_t01_00221133.val[1]), vget_low_f32(_t23_00221133.val[1]));
float32x4_t _t_22 = vcombine_f32(vget_high_f32(_t01_00221133.val[0]), vget_high_f32(_t23_00221133.val[0]));
float32x4_t _t_33 = vcombine_f32(vget_high_f32(_t01_00221133.val[1]), vget_high_f32(_t23_00221133.val[1]));
float32x4_t _t_44 = vcombine_f32(vget_low_f32(_t01_44665577.val[0]), vget_low_f32(_t23_44665577.val[0]));
float32x4_t _t_55 = vcombine_f32(vget_low_f32(_t01_44665577.val[1]), vget_low_f32(_t23_44665577.val[1]));
float32x4_t _t_66 = vcombine_f32(vget_high_f32(_t01_44665577.val[0]), vget_high_f32(_t23_44665577.val[0]));
float32x4_t _t_77 = vcombine_f32(vget_high_f32(_t01_44665577.val[1]), vget_high_f32(_t23_44665577.val[1]));
float32x4_t _t_0_m_6 = vsubq_f32(_t_00, _t_66);
float32x4_t _t_7_m_1 = vsubq_f32(_t_77, _t_11);
float32x4_t _t_4_m_2 = vsubq_f32(_t_44, _t_22);
float32x4_t _t_3_m_5 = vsubq_f32(_t_33, _t_55);
float32x4_t _r0_tm_0_0 = vmlaq_lane_f32(_t_0_m_6, _t_4_m_2, vget_high_f32(_coeff1), 1);
float32x4_t _r0_tm_4_3 = vmlaq_lane_f32(_t_7_m_1, _t_3_m_5, vget_high_f32(_coeff1), 1);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_m_6 = vaddq_f32(_t_22, _t_66);
float32x4_t _t_1_m_5 = vaddq_f32(_t_11, _t_55);
float32x4_t _tmp12a = vmlsq_lane_f32(_t_2_m_6, _t_44, vget_high_f32(_coeff1), 0);
float32x4_t _tmp12b = vmlsq_lane_f32(_t_1_m_5, _t_33, vget_high_f32(_coeff1), 0);
float32x4_t _r0_tm_0_1 = vaddq_f32(_tmp12a, _tmp12b);
float32x4_t _r0_tm_0_2 = vsubq_f32(_tmp12a, _tmp12b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_4_x_c = vmulq_lane_f32(_t_44, vget_high_f32(_coeff0), 0);
float32x4_t _t_3_x_c = vmulq_lane_f32(_t_33, vget_low_f32(_coeff1), 0);
float32x4_t _tmp34a = vaddq_f32(_t_66, _t_4_x_c);
_tmp34a = vmlaq_lane_f32(_tmp34a, _t_22, vget_low_f32(_coeff0), 0);
float32x4_t _tmp34b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_low_f32(_coeff0), 1);
_tmp34b = vmlaq_lane_f32(_tmp34b, _t_55, vget_high_f32(_coeff0), 1);
float32x4_t _r0_tm_0_3 = vaddq_f32(_tmp34a, _tmp34b);
float32x4_t _r0_tm_4_0 = vsubq_f32(_tmp34a, _tmp34b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_0_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_0_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_0_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_0_3, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_0, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_0, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_0, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_0, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
float32x4_t _t_2_a_4c = vaddq_f32(_t_22, _t_4_x_c);
float32x4_t _tmp56a = vmlaq_lane_f32(_t_66, _t_2_a_4c, vget_low_f32(_coeff1), 1);
float32x4_t _tmp56b = vmlaq_lane_f32(_t_3_x_c, _t_11, vget_high_f32(_coeff0), 1);
_tmp56b = vmlaq_lane_f32(_tmp56b, _t_55, vget_low_f32(_coeff0), 1);
float32x4_t _r0_tm_4_1 = vaddq_f32(_tmp56a, _tmp56b);
float32x4_t _r0_tm_4_2 = vsubq_f32(_tmp56a, _tmp56b);
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_1, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_1, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_1, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_1, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_2, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_2, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_2, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_2, 3);
r0_tm0 += img0_tm.w*tiles;
r0_tm1 += img0_tm.w*tiles;
r0_tm2 += img0_tm.w*tiles;
r0_tm3 += img0_tm.w*tiles;
r0_tm0[0] = vgetq_lane_f32(_r0_tm_4_3, 0);
r0_tm1[0] = vgetq_lane_f32(_r0_tm_4_3, 1);
r0_tm2[0] = vgetq_lane_f32(_r0_tm_4_3, 2);
r0_tm3[0] = vgetq_lane_f32(_r0_tm_4_3, 3);
t0 += 8*4;
t1 += 8*4;
t2 += 8*4;
t3 += 8*4;
r0_tm0 += img0_tm.w*tiles*25;
r0_tm1 += img0_tm.w*tiles*25;
r0_tm2 += img0_tm.w*tiles*25;
r0_tm3 += img0_tm.w*tiles*25;
}
#else // __aarch64__
float* t0 = tmp[0];
float* t1 = tmp[1];
float* t2 = tmp[2];
float* t3 = tmp[3];
float* t4 = tmp[4];
float* t5 = tmp[5];
float* t6 = tmp[6];
float* t7 = tmp[7];
int stepw = w*4*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8], %26 \n"
"vld1.f32 {d20-d23}, [%9], %26 \n"
"vld1.f32 {d24-d27}, [%10], %26 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11], %26 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4-d5}, [%0]! \n"// tmp[0][m]
"vmov q3, q7 \n"// use q7
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16-d17}, [%1]! \n"// tmp[1][m]
"vmla.f32 q4, q6, %e25[1] \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18-d19}, [%2]! \n"// tmp[2][m]
"vadd.f32 q8, q2, q3 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vadd.f32 q2, q4, q5 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d16-d17}, [%3]! \n"// tmp[3][m]
"vst1.f32 {d18-d19}, [%4]! \n"// tmp[4][m]
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d4-d5}, [%5]! \n"// tmp[5][m]
"vst1.f32 {d6-d7}, [%6]! \n"// tmp[6][m]
"vst1.f32 {d12-d13}, [%7]! \n"// tmp[7][m]
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(t2), // %2
"=r"(t3), // %3
"=r"(t4), // %4
"=r"(t5), // %5
"=r"(t6), // %6
"=r"(t7), // %7
"=r"(r0), // %8
"=r"(r1), // %9
"=r"(r2), // %10
"=r"(r3) // %11
: "0"(t0),
"1"(t1),
"2"(t2),
"3"(t3),
"4"(t4),
"5"(t5),
"6"(t6),
"7"(t7),
"8"(r0),
"9"(r1),
"10"(r2),
"11"(r3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(stepw) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
t2 = tmp[2];
t3 = tmp[3];
float* r0_tm0_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm1_0 = img0_tm.row(i * w_tm/8 + j + tiles*8);
float* r0_tm2_0 = img0_tm.row(i * w_tm/8 + j + tiles*16);
float* r0_tm3_0 = img0_tm.row(i * w_tm/8 + j + tiles*24);
float* r0_tm0_4 = img0_tm.row(i * w_tm/8 + j + tiles*32);
float* r0_tm1_4 = img0_tm.row(i * w_tm/8 + j + tiles*40);
float* r0_tm2_4 = img0_tm.row(i * w_tm/8 + j + tiles*48);
float* r0_tm3_4 = img0_tm.row(i * w_tm/8 + j + tiles*56);
int step = img0_tm.w*tiles*4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%8] \n"
"add %8, %8, #128 \n"
"vld1.f32 {d20-d23}, [%9] \n"
"add %9, %9, #128 \n"
"vld1.f32 {d24-d27}, [%10] \n"
"add %10, %10, #128 \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"add %11, %11, #128 \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%0], %26 \n"
"vst1.f32 {d16[1]}, [%1], %26 \n"
"vst1.f32 {d17[0]}, [%2], %26 \n"
"vst1.f32 {d17[1]}, [%3], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%0], %26 \n"
"vst1.f32 {d18[1]}, [%1], %26 \n"
"vst1.f32 {d19[0]}, [%2], %26 \n"
"vst1.f32 {d19[1]}, [%3], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%0], %26 \n"
"vst1.f32 {d4[1]}, [%1], %26 \n"
"vst1.f32 {d5[0]}, [%2], %26 \n"
"vst1.f32 {d5[1]}, [%3], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%0], %26 \n"
"vst1.f32 {d6[1]}, [%1], %26 \n"
"vst1.f32 {d7[0]}, [%2], %26 \n"
"vst1.f32 {d7[1]}, [%3], %26 \n"
"vst1.f32 {d12[0]}, [%0] \n"
"vst1.f32 {d12[1]}, [%1] \n"
"vst1.f32 {d13[0]}, [%2] \n"
"vst1.f32 {d13[1]}, [%3] \n"
// loop1
"vld1.f32 {d16-d19}, [%8] \n"
"vld1.f32 {d20-d23}, [%9] \n"
"vld1.f32 {d24-d27}, [%10] \n"
"vtrn.32 q8, q10 \n"
"vld1.f32 {d28-d31}, [%11] \n"
"vtrn.32 q9, q11 \n"
"vtrn.32 q12, q14 \n"
"vtrn.32 q13, q15 \n"
"vswp d17, d24 \n"
"vswp d19, d26 \n"
"vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
"vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vsub.f32 q2, q8, q13 \n"
"vsub.f32 q3, q9, q12 \n"
"vadd.f32 q4, q12, q13 \n"
"vadd.f32 q5, q10, q11 \n"
"vmla.f32 q2, q3, %f25[1] \n"
"vmul.f32 q7, q14, %e25[0] \n"// q7 = _r_3_x_c
"vmul.f32 q6, q9, %f24[0] \n"// q6 = _r_4_x_c
"vmls.f32 q4, q9, %f25[0] \n"
"vmls.f32 q5, q14, %f25[0] \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vmov q3, q7 \n"// use q7
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vadd.f32 q2, q13, q6 \n"// use q6
"vmla.f32 q3, q10, %e24[1] \n"
"vadd.f32 q8, q4, q5 \n"
"vsub.f32 q9, q4, q5 \n"
"vmov q5, q7 \n"// use q7
"vadd.f32 q6, q12, q6 \n"// use q6
"vmla.f32 q5, q10, %f24[1] \n"
"vmov q4, q13 \n"
"vmla.f32 q2, q12, %e24[0] \n"
"vmla.f32 q3, q11, %f24[1] \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vmla.f32 q4, q6, %e25[1] \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vmla.f32 q5, q11, %e24[1] \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vadd.f32 q8, q2, q3 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q9, q2, q3 \n"
"vsub.f32 q6, q15, q10 \n"
"vsub.f32 q7, q14, q11 \n"
"vst1.f32 {d16[0]}, [%4], %26 \n"
"vst1.f32 {d16[1]}, [%5], %26 \n"
"vst1.f32 {d17[0]}, [%6], %26 \n"
"vst1.f32 {d17[1]}, [%7], %26 \n"
"vadd.f32 q2, q4, q5 \n"
"vst1.f32 {d18[0]}, [%4], %26 \n"
"vst1.f32 {d18[1]}, [%5], %26 \n"
"vst1.f32 {d19[0]}, [%6], %26 \n"
"vst1.f32 {d19[1]}, [%7], %26 \n"
"vsub.f32 q3, q4, q5 \n"
"vst1.f32 {d4[0]}, [%4], %26 \n"
"vst1.f32 {d4[1]}, [%5], %26 \n"
"vst1.f32 {d5[0]}, [%6], %26 \n"
"vst1.f32 {d5[1]}, [%7], %26 \n"
"vmla.f32 q6, q7, %f25[1] \n"
"vst1.f32 {d6[0]}, [%4], %26 \n"
"vst1.f32 {d6[1]}, [%5], %26 \n"
"vst1.f32 {d7[0]}, [%6], %26 \n"
"vst1.f32 {d7[1]}, [%7], %26 \n"
"vst1.f32 {d12[0]}, [%4] \n"
"vst1.f32 {d12[1]}, [%5] \n"
"vst1.f32 {d13[0]}, [%6] \n"
"vst1.f32 {d13[1]}, [%7] \n"
: "=r"(r0_tm0_0), // %0
"=r"(r0_tm1_0), // %1
"=r"(r0_tm2_0), // %2
"=r"(r0_tm3_0), // %3
"=r"(r0_tm0_4), // %4
"=r"(r0_tm1_4), // %5
"=r"(r0_tm2_4), // %6
"=r"(r0_tm3_4), // %7
"=r"(t0), // %8
"=r"(t1), // %9
"=r"(t2), // %10
"=r"(t3) // %11
: "0"(r0_tm0_0),
"1"(r0_tm1_0),
"2"(r0_tm2_0),
"3"(r0_tm3_0),
"4"(r0_tm0_4),
"5"(r0_tm1_4),
"6"(r0_tm2_4),
"7"(r0_tm3_4),
"8"(t0),
"9"(t1),
"10"(t2),
"11"(t3),
"w"(_coeff0), // %24
"w"(_coeff1), // %25
"r"(step) // %26
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* r0 = img0.row(i * 6) + j * 6;
for (int m=0; m<8; m++)
{
tmp[0][m] = r0[0] - r0[6] + (r0[4] - r0[2]) * 5.25f;
tmp[7][m] = r0[7] - r0[1] + (r0[3] - r0[5]) * 5.25f;
float tmp12a = (r0[2] + r0[6] - r0[4] * 4.25f);
float tmp12b = (r0[1] + r0[5] - r0[3] * 4.25f);
tmp[1][m] = tmp12a + tmp12b;
tmp[2][m] = tmp12a - tmp12b;
float tmp34a = (r0[6] + r0[2] * 0.25f - r0[4] * 1.25f);
float tmp34b = (r0[1] * 0.5f - r0[3] * 2.5f + r0[5] * 2.f);
tmp[3][m] = tmp34a + tmp34b;
tmp[4][m] = tmp34a - tmp34b;
float tmp56a = (r0[6] + (r0[2] - r0[4] * 1.25f) * 4.f);
float tmp56b = (r0[1] * 2.f - r0[3] * 2.5f + r0[5] * 0.5f);
tmp[5][m] = tmp56a + tmp56b;
tmp[6][m] = tmp56a - tmp56b;
r0 += w;
}
float* r0_tm_0 = img0_tm.row(i * w_tm/8 + j);
float* r0_tm_1 = img0_tm.row(i * w_tm/8 + j + tiles);
float* r0_tm_2 = img0_tm.row(i * w_tm/8 + j + tiles*2);
float* r0_tm_3 = img0_tm.row(i * w_tm/8 + j + tiles*3);
float* r0_tm_4 = img0_tm.row(i * w_tm/8 + j + tiles*4);
float* r0_tm_5 = img0_tm.row(i * w_tm/8 + j + tiles*5);
float* r0_tm_6 = img0_tm.row(i * w_tm/8 + j + tiles*6);
float* r0_tm_7 = img0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
const float* tmp0 = tmp[m];
r0_tm_0[0] = tmp0[0] - tmp0[6] + (tmp0[4] - tmp0[2]) * 5.25f;
r0_tm_7[0] = tmp0[7] - tmp0[1] + (tmp0[3] - tmp0[5]) * 5.25f;
float tmp12a = (tmp0[2] + tmp0[6] - tmp0[4] * 4.25f);
float tmp12b = (tmp0[1] - tmp0[3] * 4.25f + tmp0[5]);
r0_tm_1[0] = tmp12a + tmp12b;
r0_tm_2[0] = tmp12a - tmp12b;
float tmp34a = (tmp0[6] + tmp0[2] * 0.25f - tmp0[4] * 1.25f);
float tmp34b = (tmp0[1] * 0.5f - tmp0[3] * 2.5f + tmp0[5] * 2.f);
r0_tm_3[0] = tmp34a + tmp34b;
r0_tm_4[0] = tmp34a - tmp34b;
float tmp56a = (tmp0[6] + (tmp0[2] - tmp0[4] * 1.25f) * 4.f);
float tmp56b = (tmp0[1] * 2.f - tmp0[3] * 2.5f + tmp0[5] * 0.5f);
r0_tm_5[0] = tmp56a + tmp56b;
r0_tm_6[0] = tmp56a - tmp56b;
r0_tm_0 += img0_tm.w * tiles * 8;
r0_tm_1 += img0_tm.w * tiles * 8;
r0_tm_2 += img0_tm.w * tiles * 8;
r0_tm_3 += img0_tm.w * tiles * 8;
r0_tm_4 += img0_tm.w * tiles * 8;
r0_tm_5 += img0_tm.w * tiles * 8;
r0_tm_6 += img0_tm.w * tiles * 8;
r0_tm_7 += img0_tm.w * tiles * 8;
}
#endif // __ARM_NEON
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
// permute
// bottom_blob_tm.create(1, 64 * tiles, inch);
// Mat bottom_blob_tm2(inch, tiles, 64);
Mat bottom_blob_tm2(8*inch, tiles/8 + (tiles%8)/4 + tiles%4, 64);
#pragma omp parallel for
for (int r=0; r<64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
float* tm2p = tm2.row(i/8);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
int q=0;
for (; q<inch; q++)
{
// asm volatile("prfm pldl1keep, [%0, #256] \n" : :"r"(r0) :);
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _r0n = vld1q_f32(r0+4);
vst1q_f32(tm2p, _r0);
vst1q_f32(tm2p+4, _r0n);
// tm2p[0] = r0[0];
// tm2p[1] = r0[1];
// tm2p[2] = r0[2];
// tm2p[3] = r0[3];
// tm2p[4] = r0[4];
// tm2p[5] = r0[5];
// tm2p[6] = r0[6];
// tm2p[7] = r0[7];
r0 += bottom_blob_tm.cstep;
tm2p += 8;
}
}
for (; i+3<tiles; i+=4)
{
float* tm2p = tm2.row(i/8+(i%8)/4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
int q=0;
for (; q<inch; q++)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(r0) :);
float32x4_t _r0 = vld1q_f32(r0);
vst1q_f32(tm2p, _r0);
// tm2p[0] = r0[0];
// tm2p[1] = r0[1];
// tm2p[2] = r0[2];
// tm2p[3] = r0[3];
r0 += bottom_blob_tm.cstep;
tm2p += 4;
}
}
for (; i<tiles; i++)
{
float* tm2p = tm2.row(i/8+(i%8)/4+i%4);
const float* r0 = bottom_blob_tm;
r0 += r*tiles + i;
int q=0;
for (; q<inch; q++)
{
tm2p[0] = r0[0];
r0 += bottom_blob_tm.cstep;
tm2p += 1;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(1, 64 * tiles, outch);
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
const Mat kernel_tm0 = kernel_tm.channel(p/8);
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
Mat out4_tm = top_blob_tm.channel(p+4);
Mat out5_tm = top_blob_tm.channel(p+5);
Mat out6_tm = top_blob_tm.channel(p+6);
Mat out7_tm = top_blob_tm.channel(p+7);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
float* output4_tm = out4_tm;
float* output5_tm = out5_tm;
float* output6_tm = out6_tm;
float* output7_tm = out7_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
// inch loop
"lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v1.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v20.4s, v8.4s, v2.s[0] \n"
"fmla v21.4s, v9.4s, v2.s[0] \n"
"fmla v22.4s, v8.4s, v3.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"fmla v24.4s, v8.4s, v4.s[0] \n"
"fmla v25.4s, v9.4s, v4.s[0] \n"
"fmla v26.4s, v8.4s, v5.s[0] \n"
"fmla v27.4s, v9.4s, v5.s[0] \n"
"fmla v28.4s, v8.4s, v6.s[0] \n"
"fmla v29.4s, v9.4s, v6.s[0] \n"
"fmla v30.4s, v8.4s, v7.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v18.4s, v10.4s, v1.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[1] \n"
"fmla v22.4s, v10.4s, v3.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v24.4s, v10.4s, v4.s[1] \n"
"fmla v25.4s, v11.4s, v4.s[1] \n"
"fmla v26.4s, v10.4s, v5.s[1] \n"
"fmla v27.4s, v11.4s, v5.s[1] \n"
"fmla v28.4s, v10.4s, v6.s[1] \n"
"fmla v29.4s, v11.4s, v6.s[1] \n"
"fmla v30.4s, v10.4s, v7.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"fmla v18.4s, v12.4s, v1.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"fmla v20.4s, v12.4s, v2.s[2] \n"
"fmla v21.4s, v13.4s, v2.s[2] \n"
"fmla v22.4s, v12.4s, v3.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"fmla v24.4s, v12.4s, v4.s[2] \n"
"fmla v25.4s, v13.4s, v4.s[2] \n"
"fmla v26.4s, v12.4s, v5.s[2] \n"
"fmla v27.4s, v13.4s, v5.s[2] \n"
"fmla v28.4s, v12.4s, v6.s[2] \n"
"fmla v29.4s, v13.4s, v6.s[2] \n"
"fmla v30.4s, v12.4s, v7.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"fmla v18.4s, v14.4s, v1.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"fmla v20.4s, v14.4s, v2.s[3] \n"
"fmla v21.4s, v15.4s, v2.s[3] \n"
"fmla v22.4s, v14.4s, v3.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v14.4s, v4.s[3] \n"
"fmla v25.4s, v15.4s, v4.s[3] \n"
"fmla v26.4s, v14.4s, v5.s[3] \n"
"fmla v27.4s, v15.4s, v5.s[3] \n"
"fmla v28.4s, v14.4s, v6.s[3] \n"
"fmla v29.4s, v15.4s, v6.s[3] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
// inch loop
"lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w20, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(output4_tm), // %4
"=r"(output5_tm), // %5
"=r"(output6_tm), // %6
"=r"(output7_tm), // %7
"=r"(bb2p0), // %8
"=r"(ktm0) // %9
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(output4_tm),
"5"(output5_tm),
"6"(output6_tm),
"7"(output7_tm),
"8"(bb2p0),
"9"(ktm0),
"r"(inch) // %20
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
float32x4_t _sum0 = vdupq_n_f32(0.f);
float32x4_t _sum1 = vdupq_n_f32(0.f);
float32x4_t _sum2 = vdupq_n_f32(0.f);
float32x4_t _sum3 = vdupq_n_f32(0.f);
float32x4_t _sum4 = vdupq_n_f32(0.f);
float32x4_t _sum5 = vdupq_n_f32(0.f);
float32x4_t _sum6 = vdupq_n_f32(0.f);
float32x4_t _sum7 = vdupq_n_f32(0.f);
int q=0;
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0);
_sum1 = vmlaq_f32(_sum1, _bb2p0, _ktm1);
_sum2 = vmlaq_f32(_sum2, _bb2p0, _ktm2);
_sum3 = vmlaq_f32(_sum3, _bb2p0, _ktm3);
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm4 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm5 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm6 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm7 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum4 = vmlaq_f32(_sum4, _bb2p0, _ktm4);
_sum5 = vmlaq_f32(_sum5, _bb2p0, _ktm5);
_sum6 = vmlaq_f32(_sum6, _bb2p0, _ktm6);
_sum7 = vmlaq_f32(_sum7, _bb2p0, _ktm7);
}
// TODO transpose and acc
float sum0 = vaddvq_f32(_sum0);
float sum1 = vaddvq_f32(_sum1);
float sum2 = vaddvq_f32(_sum2);
float sum3 = vaddvq_f32(_sum3);
float sum4 = vaddvq_f32(_sum4);
float sum5 = vaddvq_f32(_sum5);
float sum6 = vaddvq_f32(_sum6);
float sum7 = vaddvq_f32(_sum7);
for (; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[0] * ktm0[1];
sum2 += bb2p0[0] * ktm0[2];
sum3 += bb2p0[0] * ktm0[3];
sum4 += bb2p0[0] * ktm0[4];
sum5 += bb2p0[0] * ktm0[5];
sum6 += bb2p0[0] * ktm0[6];
sum7 += bb2p0[0] * ktm0[7];
bb2p0 += 1;
ktm0 += 8;
}
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output4_tm[0] = sum4;
output5_tm[0] = sum5;
output6_tm[0] = sum6;
output7_tm[0] = sum7;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
output4_tm += 1;
output5_tm += 1;
output6_tm += 1;
output7_tm += 1;
}
}
}
#endif // __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
#if __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p/4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
float* output0_tm = out0_tm;
float* output1_tm = out1_tm;
float* output2_tm = out2_tm;
float* output3_tm = out3_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
// inch loop
"lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v1.s[0] \n"
"fmla v11.4s, v5.4s, v1.s[0] \n"
"fmla v12.4s, v4.4s, v2.s[0] \n"
"fmla v13.4s, v5.4s, v2.s[0] \n"
"fmla v14.4s, v4.4s, v3.s[0] \n"
"fmla v15.4s, v5.4s, v3.s[0] \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v2.s[1] \n"
"fmla v13.4s, v7.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v3.s[1] \n"
"fmla v15.4s, v7.4s, v3.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v16.4s, v0.s[2] \n"
"fmla v9.4s, v17.4s, v0.s[2] \n"
"fmla v10.4s, v16.4s, v1.s[2] \n"
"fmla v11.4s, v17.4s, v1.s[2] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v3.s[2] \n"
"fmla v15.4s, v17.4s, v3.s[2] \n"
"fmla v8.4s, v18.4s, v0.s[3] \n"
"fmla v9.4s, v19.4s, v0.s[3] \n"
"fmla v10.4s, v18.4s, v1.s[3] \n"
"fmla v11.4s, v19.4s, v1.s[3] \n"
"fmla v12.4s, v18.4s, v2.s[3] \n"
"fmla v13.4s, v19.4s, v2.s[3] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"veor q12, q12, q12 \n"
"veor q13, q13, q13 \n"
"veor q14, q14, q14 \n"
"veor q15, q15, q15 \n"
// inch loop
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
// "vldm %4!, {d8-d15} \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
// "vldm %5!, {d0-d7} \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d2[0] \n"
"vmla.f32 q11, q5, d2[0] \n"
"vmla.f32 q12, q4, d4[0] \n"
"vmla.f32 q13, q5, d4[0] \n"
"vmla.f32 q14, q4, d6[0] \n"
"vmla.f32 q15, q5, d6[0] \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q12, q6, d4[1] \n"
"vmla.f32 q13, q7, d4[1] \n"
"vmla.f32 q14, q6, d6[1] \n"
"vmla.f32 q15, q7, d6[1] \n"
"pld [%4, #512] \n"
// "vldm %4!, {d8-d15} \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d1[0] \n"
"vmla.f32 q9, q5, d1[0] \n"
"vmla.f32 q10, q4, d3[0] \n"
"vmla.f32 q11, q5, d3[0] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q14, q4, d7[0] \n"
"vmla.f32 q15, q5, d7[0] \n"
"vmla.f32 q8, q6, d1[1] \n"
"vmla.f32 q9, q7, d1[1] \n"
"vmla.f32 q10, q6, d3[1] \n"
"vmla.f32 q11, q7, d3[1] \n"
"vmla.f32 q12, q6, d5[1] \n"
"vmla.f32 q13, q7, d5[1] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q15, q7, d7[1] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q15, q5, d1[1] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
"vst1.f32 {d20-d23}, [%1]! \n"
"vst1.f32 {d24-d27}, [%2]! \n"
"vst1.f32 {d28-d31}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
// inch loop
"lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v1.s[0] \n"
"fmla v10.4s, v4.4s, v2.s[0] \n"
"fmla v11.4s, v4.4s, v3.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v2.s[1] \n"
"fmla v11.4s, v5.4s, v3.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v9.4s, v6.4s, v1.s[2] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[3] \n"
"fmla v10.4s, v7.4s, v2.s[3] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
// inch loop
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
// "vldm %4!, {d8-d15} \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
// "vldm %5!, {d0-d7} \n"
"vld1.f32 {d0-d3}, [%5 :128]! \n"
"vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
"vst1.f32 {d18-d19}, [%1]! \n"
"vst1.f32 {d20-d21}, [%2]! \n"
"vst1.f32 {d22-d23}, [%3]! \n"
: "=r"(output0_tm), // %0
"=r"(output1_tm), // %1
"=r"(output2_tm), // %2
"=r"(output3_tm), // %3
"=r"(bb2p0), // %4
"=r"(ktm0) // %5
: "0"(output0_tm),
"1"(output1_tm),
"2"(output2_tm),
"3"(output3_tm),
"4"(bb2p0),
"5"(ktm0),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
float32x4_t _sum0 = vdupq_n_f32(0.f);
float32x4_t _sum1 = vdupq_n_f32(0.f);
float32x4_t _sum2 = vdupq_n_f32(0.f);
float32x4_t _sum3 = vdupq_n_f32(0.f);
int q=0;
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
// asm volatile("prfm pldl1keep, [%0, #512] \n" : :"r"(ktm0) :);
float32x4_t _ktm0 = vld1q_f32(ktm0 + 0);
float32x4_t _ktm1 = vld1q_f32(ktm0 + 4);
float32x4_t _ktm2 = vld1q_f32(ktm0 + 8);
float32x4_t _ktm3 = vld1q_f32(ktm0 + 12);
ktm0 += 16;
_sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0);
_sum1 = vmlaq_f32(_sum1, _bb2p0, _ktm1);
_sum2 = vmlaq_f32(_sum2, _bb2p0, _ktm2);
_sum3 = vmlaq_f32(_sum3, _bb2p0, _ktm3);
}
// TODO transpose and acc
float sum0 = vaddvq_f32(_sum0);
float sum1 = vaddvq_f32(_sum1);
float sum2 = vaddvq_f32(_sum2);
float sum3 = vaddvq_f32(_sum3);
for (; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
sum1 += bb2p0[0] * ktm0[1];
sum2 += bb2p0[0] * ktm0[2];
sum3 += bb2p0[0] * ktm0[3];
bb2p0 += 1;
ktm0 += 4;
}
output0_tm[0] = sum0;
output1_tm[0] = sum1;
output2_tm[0] = sum2;
output3_tm[0] = sum3;
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
}
}
}
remain_outch_start += nn_outch << 2;
for (int p=remain_outch_start; p<outch; p++)
{
#if __aarch64__
const Mat kernel_tm0 = kernel_tm.channel(p/8+(p%8)/4+p%4);
#else
const Mat kernel_tm0 = kernel_tm.channel(p/4+p%4);
#endif
Mat out0_tm = top_blob_tm.channel(p);
float* output0_tm = out0_tm;
for (int r=0; r<64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
// tile
int i=0;
for (; i+7<tiles; i+=8)
{
const float* bb2p0 = bb2.row(i/8);
const float* ktm0 = kernel_tm0.row(r);
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
// inch loop
"lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #32] \n"
"ld1r {v0.4s}, [%5], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
// inch loop
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
// "vldm %4!, {d8-d15} \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"pld [%4, #512] \n"
// "vldm %4!, {d24-d31} \n"
"vld1.f32 {d24-d27}, [%4 :128]! \n"
"vld1.f32 {d28-d31}, [%4 :128]! \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #32] \n"
"vld1.f32 {d0[],d1[]}, [%5]! \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
}
for (; i+3<tiles; i+=4)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4);
const float* ktm0 = kernel_tm0.row(r);
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
// inch loop
"lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w6, #3 \n"// w4 = remain = tiles & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #32] \n"
"ld1r {v0.4s}, [%5], #4 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __aarch64__
asm volatile(
"veor q8, q8, q8 \n"
// inch loop
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
// "vldm %4!, {d8-d15} \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = tiles & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4]! \n"
"pld [%5, #32] \n"
"vld1.f32 {d0[],d1[]}, [%5]! \n"
"vmla.f32 q8, q4, q0 \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0]! \n"
: "=r"(output0_tm), // %0
"=r"(bb2p0), // %1
"=r"(ktm0) // %2
: "0"(output0_tm),
"1"(bb2p0),
"2"(ktm0),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __aarch64__
}
for (; i<tiles; i++)
{
const float* bb2p0 = bb2.row(i/8+(i%8)/4+i%4);
const float* ktm0 = kernel_tm0.row(r);
float32x4_t _sum0 = vdupq_n_f32(0.f);
int q=0;
for (; q+3<inch; q+=4)
{
// asm volatile("prfm pldl1keep, [%0, #128] \n" : :"r"(bb2p0) :);
float32x4_t _bb2p0 = vld1q_f32(bb2p0);
bb2p0 += 4;
float32x4_t _ktm0 = vld1q_f32(ktm0);
ktm0 += 4;
_sum0 = vmlaq_f32(_sum0, _bb2p0, _ktm0);
}
float sum0 = vaddvq_f32(_sum0);
for (; q<inch; q++)
{
sum0 += bb2p0[0] * ktm0[0];
bb2p0 += 1;
ktm0 += 1;
}
output0_tm[0] = sum0;
output0_tm += 1;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch);
{
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#if __ARM_NEON
const float coeff[4] = { 4.f, 8.f, 16.f, 32.f };
float32x4_t _coeff = vld1q_f32(coeff);
#endif // __ARM_NEON
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = w_tm/8 * h_tm/8;
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
#if __ARM_NEON
float32x2_t _bias0 = vdup_n_f32(bias0);
#endif // __ARM_NEON
float tmp[6][8];
// tile
for (int i=0; i<outh/6; i++)
{
for (int j=0; j<outw/6; j++)
{
#if __ARM_NEON
#if __aarch64__
const float* output0_tm0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3 = out0_tm.row(i * w_tm/8 + j + tiles*24);
for (int m=0; m+3<8; m+=4)
{
float32x4_t _output0_tm_00;
float32x4_t _output0_tm_11;
float32x4_t _output0_tm_22;
float32x4_t _output0_tm_33;
float32x4_t _output0_tm_44;
float32x4_t _output0_tm_55;
float32x4_t _output0_tm_66;
float32x4_t _output0_tm_77;
_output0_tm_00 = vsetq_lane_f32(output0_tm0[0], _output0_tm_00, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm1[0], _output0_tm_00, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm2[0], _output0_tm_00, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_00 = vsetq_lane_f32(output0_tm3[0], _output0_tm_00, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm0[0], _output0_tm_11, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm1[0], _output0_tm_11, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm2[0], _output0_tm_11, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_11 = vsetq_lane_f32(output0_tm3[0], _output0_tm_11, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm0[0], _output0_tm_22, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm1[0], _output0_tm_22, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm2[0], _output0_tm_22, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_22 = vsetq_lane_f32(output0_tm3[0], _output0_tm_22, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm0[0], _output0_tm_33, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm1[0], _output0_tm_33, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm2[0], _output0_tm_33, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_33 = vsetq_lane_f32(output0_tm3[0], _output0_tm_33, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm0[0], _output0_tm_44, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm1[0], _output0_tm_44, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm2[0], _output0_tm_44, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_44 = vsetq_lane_f32(output0_tm3[0], _output0_tm_44, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm0[0], _output0_tm_55, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm1[0], _output0_tm_55, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm2[0], _output0_tm_55, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_55 = vsetq_lane_f32(output0_tm3[0], _output0_tm_55, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm0[0], _output0_tm_66, 0);
output0_tm0 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm1[0], _output0_tm_66, 1);
output0_tm1 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm2[0], _output0_tm_66, 2);
output0_tm2 += out0_tm.w * tiles;
_output0_tm_66 = vsetq_lane_f32(output0_tm3[0], _output0_tm_66, 3);
output0_tm3 += out0_tm.w * tiles;
_output0_tm_77 = vsetq_lane_f32(output0_tm0[0], _output0_tm_77, 0);
_output0_tm_77 = vsetq_lane_f32(output0_tm1[0], _output0_tm_77, 1);
_output0_tm_77 = vsetq_lane_f32(output0_tm2[0], _output0_tm_77, 2);
_output0_tm_77 = vsetq_lane_f32(output0_tm3[0], _output0_tm_77, 3);
float32x4_t _tmp024a = vaddq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp135a = vsubq_f32(_output0_tm_11, _output0_tm_22);
float32x4_t _tmp024b = vaddq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp135b = vsubq_f32(_output0_tm_33, _output0_tm_44);
float32x4_t _tmp024c = vaddq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp135c = vsubq_f32(_output0_tm_55, _output0_tm_66);
float32x4_t _tmp0 = vaddq_f32(_output0_tm_00, _tmp024a);
_tmp0 = vmlaq_lane_f32(_tmp0, _tmp024c, vget_high_f32(_coeff), 1);
_tmp0 = vaddq_f32(_tmp0, _tmp024b);
float32x4_t _tmp2 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_tmp2 = vmlaq_lane_f32(_tmp2, _tmp024c, vget_low_f32(_coeff), 1);
float32x4_t _tmp4 = vmlaq_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
_tmp4 = vaddq_f32(_tmp4, _tmp024c);
vst1q_f32(&tmp[0][m], _tmp0);
vst1q_f32(&tmp[2][m], _tmp2);
vst1q_f32(&tmp[4][m], _tmp4);
float32x4_t _tmp1 = vmlaq_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
_tmp1 = vaddq_f32(_tmp1, _tmp135b);
float32x4_t _tmp3 = vmlaq_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_tmp3 = vmlaq_lane_f32(_tmp3, _tmp135c, vget_low_f32(_coeff), 0);
float32x4_t _tmp5 = vaddq_f32(_output0_tm_77, _tmp135a);
_tmp5 = vmlaq_lane_f32(_tmp5, _tmp135b, vget_high_f32(_coeff), 1);
_tmp5 = vaddq_f32(_tmp5, _tmp135c);
vst1q_f32(&tmp[1][m], _tmp1);
vst1q_f32(&tmp[3][m], _tmp3);
vst1q_f32(&tmp[5][m], _tmp5);
output0_tm0 += out0_tm.w*tiles*25;
output0_tm1 += out0_tm.w*tiles*25;
output0_tm2 += out0_tm.w*tiles*25;
output0_tm3 += out0_tm.w*tiles*25;
}
const float* t0 = tmp[0];
const float* t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
for (int m=0; m+1<6; m+=2)
{
float32x4_t _t0_0123 = vld1q_f32(t0);
float32x4_t _t0_4567 = vld1q_f32(t0+4);
float32x4_t _t1_0123 = vld1q_f32(t1);
float32x4_t _t1_4567 = vld1q_f32(t1+4);
float32x4x2_t _t01_00221133 = vtrnq_f32(_t0_0123, _t1_0123);
float32x4x2_t _t01_44665577 = vtrnq_f32(_t0_4567, _t1_4567);
float32x2_t _t_00 = vget_low_f32(_t01_00221133.val[0]);
float32x2_t _t_11 = vget_low_f32(_t01_00221133.val[1]);
float32x2_t _t_22 = vget_high_f32(_t01_00221133.val[0]);
float32x2_t _t_33 = vget_high_f32(_t01_00221133.val[1]);
float32x2_t _t_44 = vget_low_f32(_t01_44665577.val[0]);
float32x2_t _t_55 = vget_low_f32(_t01_44665577.val[1]);
float32x2_t _t_66 = vget_high_f32(_t01_44665577.val[0]);
float32x2_t _t_77 = vget_high_f32(_t01_44665577.val[1]);
float32x2_t _tmp024a = vadd_f32(_t_11, _t_22);
float32x2_t _tmp135a = vsub_f32(_t_11, _t_22);
float32x2_t _tmp024b = vadd_f32(_t_33, _t_44);
float32x2_t _tmp135b = vsub_f32(_t_33, _t_44);
float32x2_t _tmp024c = vadd_f32(_t_55, _t_66);
float32x2_t _tmp135c = vsub_f32(_t_55, _t_66);
float32x2_t _output_0 = vadd_f32(_t_00, _tmp024a);
_output_0 = vmla_lane_f32(_output_0, _tmp024c, vget_high_f32(_coeff), 1);
_output_0 = vadd_f32(_output_0, _tmp024b);
_output_0 = vadd_f32(_output_0, _bias0);
float32x2_t _output_2 = vmla_lane_f32(_tmp024a, _tmp024b, vget_low_f32(_coeff), 0);
_output_2 = vmla_lane_f32(_output_2, _tmp024c, vget_low_f32(_coeff), 1);
_output_2 = vadd_f32(_output_2, _bias0);
float32x2_t _output_4 = vmla_lane_f32(_tmp024a, _tmp024b, vget_high_f32(_coeff), 0);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _tmp024c);
_output_4 = vadd_f32(_output_4, _bias0);
output0[0] = vget_lane_f32(_output_0, 0);
output1[0] = vget_lane_f32(_output_0, 1);
output0[2] = vget_lane_f32(_output_2, 0);
output1[2] = vget_lane_f32(_output_2, 1);
output0[4] = vget_lane_f32(_output_4, 0);
output1[4] = vget_lane_f32(_output_4, 1);
float32x2_t _output_1 = vmla_lane_f32(_tmp135a, _tmp135c, vget_high_f32(_coeff), 0);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _tmp135b);
_output_1 = vadd_f32(_output_1, _bias0);
float32x2_t _output_3 = vmla_lane_f32(_tmp135a, _tmp135b, vget_low_f32(_coeff), 1);
_output_3 = vmla_lane_f32(_output_3, _tmp135c, vget_low_f32(_coeff), 0);
_output_3 = vadd_f32(_output_3, _bias0);
float32x2_t _output_5 = vadd_f32(_t_77, _tmp135a);
_output_5 = vmla_lane_f32(_output_5, _tmp135b, vget_high_f32(_coeff), 1);
_output_5 = vadd_f32(_output_5, _tmp135c);
_output_5 = vadd_f32(_output_5, _bias0);
output0[1] = vget_lane_f32(_output_1, 0);
output1[1] = vget_lane_f32(_output_1, 1);
output0[3] = vget_lane_f32(_output_3, 0);
output1[3] = vget_lane_f32(_output_3, 1);
output0[5] = vget_lane_f32(_output_5, 0);
output1[5] = vget_lane_f32(_output_5, 1);
t0 += 8*2;
t1 += 8*2;
output0 += outw*2;
output1 += outw*2;
}
#else // __aarch64__
const float* output0_tm0_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm1_0 = out0_tm.row(i * w_tm/8 + j + tiles*8);
const float* output0_tm2_0 = out0_tm.row(i * w_tm/8 + j + tiles*16);
const float* output0_tm3_0 = out0_tm.row(i * w_tm/8 + j + tiles*24);
const float* output0_tm0_4 = out0_tm.row(i * w_tm/8 + j + tiles*32);
const float* output0_tm1_4 = out0_tm.row(i * w_tm/8 + j + tiles*40);
const float* output0_tm2_4 = out0_tm.row(i * w_tm/8 + j + tiles*48);
const float* output0_tm3_4 = out0_tm.row(i * w_tm/8 + j + tiles*56);
float* t0 = tmp[0];
float* t1 = tmp[1];
// int step = out0_tm.w * tiles * 2*4 *4;
int step = out0_tm.w * tiles *4;
asm volatile(
// loop0
// "vld1.f32 {d16-d17}, [%2], %21 \n"
// "vld1.f32 {d18-d19}, [%3], %21 \n"
// "vld1.f32 {d20-d21}, [%4], %21 \n"
// "vld1.f32 {d22-d23}, [%5], %21 \n"
// "vld1.f32 {d24-d25}, [%6], %21 \n"
// "vld1.f32 {d26-d27}, [%7], %21 \n"
// "vld1.f32 {d28-d29}, [%8], %21 \n"
// "vld1.f32 {d30-d31}, [%9], %21 \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%2], %21 \n"
"vld1.f32 {d16[1]}, [%3], %21 \n"
"vld1.f32 {d17[0]}, [%4], %21 \n"
"vld1.f32 {d17[1]}, [%5], %21 \n"
"vld1.f32 {d20[0]}, [%2], %21 \n"
"vld1.f32 {d20[1]}, [%3], %21 \n"
"vld1.f32 {d21[0]}, [%4], %21 \n"
"vld1.f32 {d21[1]}, [%5], %21 \n"
"vld1.f32 {d24[0]}, [%2], %21 \n"
"vld1.f32 {d24[1]}, [%3], %21 \n"
"vld1.f32 {d25[0]}, [%4], %21 \n"
"vld1.f32 {d25[1]}, [%5], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%2], %21 \n"
"vld1.f32 {d28[1]}, [%3], %21 \n"
"vld1.f32 {d29[0]}, [%4], %21 \n"
"vld1.f32 {d29[1]}, [%5], %21 \n"
"vld1.f32 {d18[0]}, [%2], %21 \n"
"vld1.f32 {d18[1]}, [%3], %21 \n"
"vld1.f32 {d19[0]}, [%4], %21 \n"
"vld1.f32 {d19[1]}, [%5], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%2], %21 \n"
"vld1.f32 {d22[1]}, [%3], %21 \n"
"vld1.f32 {d23[0]}, [%4], %21 \n"
"vld1.f32 {d23[1]}, [%5], %21 \n"
"vld1.f32 {d26[0]}, [%2], %21 \n"
"vld1.f32 {d26[1]}, [%3], %21 \n"
"vld1.f32 {d27[0]}, [%4], %21 \n"
"vld1.f32 {d27[1]}, [%5], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%2] \n"
"vld1.f32 {d30[1]}, [%3] \n"
"vld1.f32 {d31[0]}, [%4] \n"
"vld1.f32 {d31[1]}, [%5] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"sub %0, %0, #112 \n"
"vst1.f32 {d30-d31}, [%1] \n"
"sub %1, %1, #112 \n"
// loop1
// "vld1.f32 {d16-d17}, [%2] \n"
// "vld1.f32 {d18-d19}, [%3] \n"
// "vld1.f32 {d20-d21}, [%4] \n"
// "vld1.f32 {d22-d23}, [%5] \n"
// "vld1.f32 {d24-d25}, [%6] \n"
// "vld1.f32 {d26-d27}, [%7] \n"
// "vld1.f32 {d28-d29}, [%8] \n"
// "vld1.f32 {d30-d31}, [%9] \n"
// "vtrn.32 q8, q10 \n"
// "vtrn.32 q9, q11 \n"
// "vtrn.32 q12, q14 \n"
// "vtrn.32 q13, q15 \n"
// "vswp d17, d24 \n"
// "vswp d19, d26 \n"
// "vswp d21, d28 \n"// q8 = 00 q9 = 44 q10 = 11 q11 = 55
// "vswp d23, d30 \n"// q12 = 22 q13 = 66 q14 = 33 q15 = 77
"vld1.f32 {d16[0]}, [%6], %21 \n"
"vld1.f32 {d16[1]}, [%7], %21 \n"
"vld1.f32 {d17[0]}, [%8], %21 \n"
"vld1.f32 {d17[1]}, [%9], %21 \n"
"vld1.f32 {d20[0]}, [%6], %21 \n"
"vld1.f32 {d20[1]}, [%7], %21 \n"
"vld1.f32 {d21[0]}, [%8], %21 \n"
"vld1.f32 {d21[1]}, [%9], %21 \n"
"vld1.f32 {d24[0]}, [%6], %21 \n"
"vld1.f32 {d24[1]}, [%7], %21 \n"
"vld1.f32 {d25[0]}, [%8], %21 \n"
"vld1.f32 {d25[1]}, [%9], %21 \n"
"vadd.f32 q2, q10, q12 \n"
"vsub.f32 q3, q10, q12 \n"
"vld1.f32 {d28[0]}, [%6], %21 \n"
"vld1.f32 {d28[1]}, [%7], %21 \n"
"vld1.f32 {d29[0]}, [%8], %21 \n"
"vld1.f32 {d29[1]}, [%9], %21 \n"
"vld1.f32 {d18[0]}, [%6], %21 \n"
"vld1.f32 {d18[1]}, [%7], %21 \n"
"vld1.f32 {d19[0]}, [%8], %21 \n"
"vld1.f32 {d19[1]}, [%9], %21 \n"
"vadd.f32 q4, q14, q9 \n"
"vsub.f32 q5, q14, q9 \n"
"vld1.f32 {d22[0]}, [%6], %21 \n"
"vld1.f32 {d22[1]}, [%7], %21 \n"
"vld1.f32 {d23[0]}, [%8], %21 \n"
"vld1.f32 {d23[1]}, [%9], %21 \n"
"vld1.f32 {d26[0]}, [%6], %21 \n"
"vld1.f32 {d26[1]}, [%7], %21 \n"
"vld1.f32 {d27[0]}, [%8], %21 \n"
"vld1.f32 {d27[1]}, [%9], %21 \n"
"vadd.f32 q6, q11, q13 \n"
"vsub.f32 q7, q11, q13 \n"// spare q9 q10 q11 q12 q13 q14
"vld1.f32 {d30[0]}, [%6] \n"
"vld1.f32 {d30[1]}, [%7] \n"
"vld1.f32 {d31[0]}, [%8] \n"
"vld1.f32 {d31[1]}, [%9] \n"
"vmov q9, q3 \n"
"vadd.f32 q8, q8, q2 \n"
"vmla.f32 q9, q7, %f20[0] \n"
"vmov q12, q2 \n"
"vmov q10, q2 \n"
"vmov q11, q3 \n"
"vmla.f32 q12, q4, %f20[0] \n"
"vadd.f32 q15, q15, q3 \n"
"vmla.f32 q8, q6, %f20[1] \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q4, %e20[0] \n"
"vmla.f32 q11, q5, %e20[1] \n"
"vadd.f32 q12, q12, q6 \n"
"vmla.f32 q15, q5, %f20[1] \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
"vmla.f32 q10, q6, %e20[1] \n"
"vmla.f32 q11, q7, %e20[0] \n"
"vadd.f32 q12, q12, q6 \n"
"vadd.f32 q15, q15, q7 \n"
"vst1.f32 {d16-d17}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d18-d19}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d20-d21}, [%0] \n"
"add %0, %0, #64 \n"
"vst1.f32 {d22-d23}, [%1] \n"
"add %1, %1, #64 \n"
"vst1.f32 {d24-d25}, [%0] \n"
"vst1.f32 {d30-d31}, [%1] \n"
: "=r"(t0), // %0
"=r"(t1), // %1
"=r"(output0_tm0_0), // %2
"=r"(output0_tm1_0), // %3
"=r"(output0_tm2_0), // %4
"=r"(output0_tm3_0), // %5
"=r"(output0_tm0_4), // %6
"=r"(output0_tm1_4), // %7
"=r"(output0_tm2_4), // %8
"=r"(output0_tm3_4) // %9
: "0"(t0),
"1"(t1),
"2"(output0_tm0_0),
"3"(output0_tm1_0),
"4"(output0_tm2_0),
"5"(output0_tm3_0),
"6"(output0_tm0_4),
"7"(output0_tm1_4),
"8"(output0_tm2_4),
"9"(output0_tm3_4),
"w"(_coeff), // %20
"r"(step) // %21
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
t0 = tmp[0];
t1 = tmp[1];
float* output0 = out0.row(i * 6) + j * 6;
float* output1 = output0 + outw;
int stepw = outw*2 * 4;
asm volatile(
// loop0
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop1
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
// loop2
"vld1.f32 {d16-d19}, [%2] \n"
"vld1.f32 {d20-d23}, [%3] \n"
"add %2, %2, #64 \n"
"add %3, %3, #64 \n"
"vtrn.32 q8, q10 \n"// q8 = 0 2 q10 = 1 3
"vtrn.32 q9, q11 \n"// q9 = 4 6 q11 = 5 7
"vadd.f32 d4, d20, d17 \n"
"vsub.f32 d5, d20, d17 \n"
"vadd.f32 d6, d21, d18 \n"
"vsub.f32 d7, d21, d18 \n"
"vadd.f32 d8, d22, d19 \n"
"vsub.f32 d9, d22, d19 \n"// spare d17 ~ d22
"vmov d20, d5 \n"
"vmov d18, d4 \n"
"vadd.f32 d16, d16, d4 \n"
"vmla.f32 d20, d9, %f8[0] \n"
"vmov d17, d4 \n"
"vmov d21, d5 \n"
"vmla.f32 d18, d6, %f8[0] \n"
"vadd.f32 d22, d23, d5 \n"
"vmla.f32 d16, d8, %f8[1] \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d6, %e8[0] \n"
"vmla.f32 d21, d7, %e8[1] \n"
"vadd.f32 d18, d18, d8 \n"
"vmla.f32 d22, d7, %f8[1] \n"
"vadd.f32 d16, d16, d6 \n"
"vadd.f32 d20, d20, d7 \n"
"vmla.f32 d17, d8, %e8[1] \n"
"vmla.f32 d21, d9, %e8[0] \n"
"vadd.f32 d18, d18, d8 \n"
"vadd.f32 d22, d22, d9 \n"
"vadd.f32 d16, d16, %P9 \n"// _bias0
"vadd.f32 d20, d20, %P9 \n"// _bias0
"vadd.f32 d17, d17, %P9 \n"// _bias0
"vadd.f32 d21, d21, %P9 \n"// _bias0
"vadd.f32 d18, d18, %P9 \n"// _bias0
"vadd.f32 d22, d22, %P9 \n"// _bias0
"vtrn.f32 q8, q10 \n"
"vtrn.f32 d18, d22 \n"
"vst1.f32 {d16-d18}, [%0], %10 \n"
"vst1.f32 {d20-d22}, [%1], %10 \n"
: "=r"(output0), // %0
"=r"(output1), // %1
"=r"(t0), // %2
"=r"(t1) // %3
: "0"(output0),
"1"(output1),
"2"(t0),
"3"(t1),
"w"(_coeff), // %8
"w"(_bias0), // %9
"r"(stepw) // %10
: "memory", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
const float* output0_tm_0 = out0_tm.row(i * w_tm/8 + j);
const float* output0_tm_1 = out0_tm.row(i * w_tm/8 + j + tiles);
const float* output0_tm_2 = out0_tm.row(i * w_tm/8 + j + tiles*2);
const float* output0_tm_3 = out0_tm.row(i * w_tm/8 + j + tiles*3);
const float* output0_tm_4 = out0_tm.row(i * w_tm/8 + j + tiles*4);
const float* output0_tm_5 = out0_tm.row(i * w_tm/8 + j + tiles*5);
const float* output0_tm_6 = out0_tm.row(i * w_tm/8 + j + tiles*6);
const float* output0_tm_7 = out0_tm.row(i * w_tm/8 + j + tiles*7);
for (int m=0; m<8; m++)
{
float tmp024a = output0_tm_1[0] + output0_tm_2[0];
float tmp135a = output0_tm_1[0] - output0_tm_2[0];
float tmp024b = output0_tm_3[0] + output0_tm_4[0];
float tmp135b = output0_tm_3[0] - output0_tm_4[0];
float tmp024c = output0_tm_5[0] + output0_tm_6[0];
float tmp135c = output0_tm_5[0] - output0_tm_6[0];
tmp[0][m] = output0_tm_0[0] + tmp024a + tmp024b + tmp024c * 32;
tmp[2][m] = tmp024a + tmp024b * 4 + tmp024c * 8;
tmp[4][m] = tmp024a + tmp024b * 16 + tmp024c + tmp024c;
tmp[1][m] = tmp135a + tmp135b + tmp135b + tmp135c * 16;
tmp[3][m] = tmp135a + tmp135b * 8 + tmp135c * 4;
tmp[5][m] = output0_tm_7[0] + tmp135a + tmp135b * 32 + tmp135c;
output0_tm_0 += out0_tm.w * tiles * 8;
output0_tm_1 += out0_tm.w * tiles * 8;
output0_tm_2 += out0_tm.w * tiles * 8;
output0_tm_3 += out0_tm.w * tiles * 8;
output0_tm_4 += out0_tm.w * tiles * 8;
output0_tm_5 += out0_tm.w * tiles * 8;
output0_tm_6 += out0_tm.w * tiles * 8;
output0_tm_7 += out0_tm.w * tiles * 8;
}
float* output0 = out0.row(i * 6) + j * 6;
for (int m=0; m<6; m++)
{
const float* tmp0 = tmp[m];
float tmp024a = tmp0[1] + tmp0[2];
float tmp135a = tmp0[1] - tmp0[2];
float tmp024b = tmp0[3] + tmp0[4];
float tmp135b = tmp0[3] - tmp0[4];
float tmp024c = tmp0[5] + tmp0[6];
float tmp135c = tmp0[5] - tmp0[6];
output0[0] = bias0 + tmp0[0] + tmp024a + tmp024b + tmp024c * 32;
output0[2] = bias0 + tmp024a + tmp024b * 4 + tmp024c * 8;
output0[4] = bias0 + tmp024a + tmp024b * 16 + tmp024c + tmp024c;
output0[1] = bias0 + tmp135a + tmp135b + tmp135b + tmp135c * 16;
output0[3] = bias0 + tmp135a + tmp135b * 8 + tmp135c * 4;
output0[5] = bias0 + tmp0[7] + tmp135a + tmp135b * 32 + tmp135c;
output0 += outw;
}
#endif // __ARM_NEON
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w);
}
static void conv3x3s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
int nn_outch = outch >> 1;
int remain_outch_start = nn_outch << 1;
#pragma omp parallel for
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 2;
Mat out0 = top_blob.channel(p);
Mat out1 = top_blob.channel(p+1);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
out0.fill(bias0);
out1.fill(bias1);
const float* k0 = kernel + p*inch*9;
const float* k1 = kernel + (p+1)*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr0 = out0;
float* outptr1 = out1;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
#if __ARM_NEON
float32x4_t _k00 = vld1q_f32(k0);
float32x4_t _k03 = vld1q_f32(k0+3);
float32x4_t _k06 = vld1q_f32(k0+6);
float32x4_t _k10 = vld1q_f32(k1);
float32x4_t _k13 = vld1q_f32(k1+3);
float32x4_t _k16 = vld1q_f32(k1+6);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v6.4s}, [%1] \n"// v6 = _sum0
"fmul v12.4s, v8.4s, %12.s[0] \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v7.4s}, [%2] \n"// v7 = _sum1
"fmul v13.4s, v8.4s, %15.s[0] \n"
"prfm pldl1keep, [%3, #128] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"// v10
"fmla v6.4s, v9.4s, %12.s[1] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v7.4s, v9.4s, %15.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"// r1
"fmla v12.4s, v14.4s, %12.s[2] \n"
"fmla v13.4s, v14.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #128] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"fmla v6.4s, v8.4s, %13.s[0] \n"
"fmla v7.4s, v8.4s, %16.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v12.4s, v9.4s, %13.s[1] \n"
"fmla v13.4s, v9.4s, %16.s[1] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"// r2
"fmla v6.4s, v14.4s, %13.s[2] \n"
"fmla v7.4s, v14.4s, %16.s[2] \n"
"prfm pldl1keep, [%5, #128] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"fmla v12.4s, v8.4s, %14.s[0] \n"
"fmla v13.4s, v8.4s, %17.s[0] \n"
"ext v14.16b, v8.16b, v10.16b, #4\n"
"fmla v6.4s, v9.4s, %14.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[1] \n"
"fmla v12.4s, v14.4s, %14.s[2] \n"
"fmla v13.4s, v14.4s, %17.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"// v8 v9 = r0
"fadd v6.4s, v6.4s, v12.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%2], #16 \n"
"bne 0b \n"
"sub %3, %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d12-d13}, [%1] \n"// q6 = _sum0
"vmul.f32 q12, q8, %e12[0] \n"
"pld [%2, #128] \n"
"vld1.f32 {d14-d15}, [%2] \n"// q7 = _sum1
"vmul.f32 q13, q8, %e15[0] \n"
"pld [%3, #128] \n"
"vld2.f32 {d20-d21}, [%3] \n"// q10
"vmla.f32 q6, q9, %e12[1] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q7, q9, %e15[1] \n"
"pld [%4, #256] \n"
"vld2.f32 {d16-d19}, [%4]! \n"// r1
"vmla.f32 q12, q11, %f12[0] \n"
"vmla.f32 q13, q11, %f15[0] \n"
"pld [%4, #128] \n"
"vld2.f32 {d20-d21}, [%4] \n"
"vmla.f32 q6, q8, %e13[0] \n"
"vmla.f32 q7, q8, %e16[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q12, q9, %e13[1] \n"
"vmla.f32 q13, q9, %e16[1] \n"
"pld [%5, #256] \n"
"vld2.f32 {d16-d19}, [%5]! \n"// r2
"vmla.f32 q6, q11, %f13[0] \n"
"vmla.f32 q7, q11, %f16[0] \n"
"pld [%5, #128] \n"
"vld2.f32 {d20-d21}, [%5] \n"
"vmla.f32 q12, q8, %e14[0] \n"
"vmla.f32 q13, q8, %e17[0] \n"
"vext.32 q11, q8, q10, #1 \n"
"vmla.f32 q6, q9, %e14[1] \n"
"vmla.f32 q7, q9, %e17[1] \n"
"vmla.f32 q12, q11, %f14[0] \n"
"vmla.f32 q13, q11, %f17[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d16-d19}, [%3]! \n"// q8 q9 = r0
"vadd.f32 q6, q6, q12 \n"
"vadd.f32 q7, q7, q13 \n"
"subs %0, #1 \n"
"vst1.f32 {d12-d13}, [%1]! \n"
"vst1.f32 {d14-d15}, [%2]! \n"
"bne 0b \n"
"sub %3, #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2) // %5
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(r0),
"4"(r1),
"5"(r2),
"w"(_k00), // %12
"w"(_k03), // %13
"w"(_k06), // %14
"w"(_k10), // %15
"w"(_k13), // %16
"w"(_k16) // %17
: "cc", "memory", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum0 = vmulq_f32(_r00, _k00);
float32x4_t _sum1 = vmulq_f32(_r00, _k10);
_sum0 = vmlaq_f32(_sum0, _r10, _k03);
_sum1 = vmlaq_f32(_sum1, _r10, _k13);
_sum0 = vmlaq_f32(_sum0, _r20, _k06);
_sum1 = vmlaq_f32(_sum1, _r20, _k16);
_sum0 = vsetq_lane_f32(*outptr0, _sum0, 3);
_sum1 = vsetq_lane_f32(*outptr1, _sum1, 3);
#if __aarch64__
*outptr0 = vaddvq_f32(_sum0);
*outptr1 = vaddvq_f32(_sum1);
#else
float32x2_t _ss0 = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss1 = vadd_f32(vget_low_f32(_sum1), vget_high_f32(_sum1));
float32x2_t _ss01 = vpadd_f32(_ss0, _ss1);
*outptr0 = vget_lane_f32(_ss01, 0);
*outptr1 = vget_lane_f32(_ss01, 1);
#endif // __aarch64__
#else
float sum0 = 0.f;
float sum1 = 0.f;
sum0 += r0[0] * k0[0];
sum0 += r0[1] * k0[1];
sum0 += r0[2] * k0[2];
sum0 += r1[0] * k0[3];
sum0 += r1[1] * k0[4];
sum0 += r1[2] * k0[5];
sum0 += r2[0] * k0[6];
sum0 += r2[1] * k0[7];
sum0 += r2[2] * k0[8];
sum1 += r0[0] * k1[0];
sum1 += r0[1] * k1[1];
sum1 += r0[2] * k1[2];
sum1 += r1[0] * k1[3];
sum1 += r1[1] * k1[4];
sum1 += r1[2] * k1[5];
sum1 += r2[0] * k1[6];
sum1 += r2[1] * k1[7];
sum1 += r2[2] * k1[8];
*outptr0 += sum0;
*outptr1 += sum1;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
outptr1++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
k0 += 9;
k1 += 9;
}
}
#pragma omp parallel for
for (int p=remain_outch_start; p<outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
const float* kernel0 = kernel + p*inch*9;
for (int q=0; q<inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w*2;
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(k0);
float32x4_t _k3456 = vld1q_f32(k1);
float32x4_t _k6789 = vld1q_f32(k2);
#endif // __ARM_NEON
int i = 0;
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw & 3;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"0: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v0.4s}, [%1] \n"
"fmla v0.4s, v2.4s, %10.s[0] \n"
"fmul v10.4s, v3.4s, %10.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmul v11.4s, v1.4s, %10.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v2.4s, v3.4s}, [%3], #32 \n"
"fmla v0.4s, v2.4s, %11.s[0] \n"
"fmla v10.4s, v3.4s, %11.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v8.4s, v9.4s}, [%3] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %11.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v2.4s, v3.4s}, [%4], #32 \n"
"fmla v0.4s, v2.4s, %12.s[0] \n"
"fmla v10.4s, v3.4s, %12.s[1] \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v8.4s, v9.4s}, [%4] \n"
"ext v1.16b, v2.16b, v8.16b, #4 \n"
"fmla v11.4s, v1.4s, %12.s[2] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v2.4s, v3.4s}, [%2], #32 \n"
"fadd v0.4s, v0.4s, v10.4s \n"
"fadd v0.4s, v0.4s, v11.4s \n"
"subs %w0, %w0, #1 \n"
"st1 {v0.4s}, [%1], #16 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15"
);
}
#else
if (nn > 0)
{
asm volatile(
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"0: \n"
"pld [%1, #128] \n"
"vld1.f32 {d0-d1}, [%1] \n"
"vmla.f32 q0, q2, %e10[0] \n"
"vmul.f32 q10, q3, %e10[1] \n"
"pld [%2, #128] \n"
"vld2.f32 {d16-d17}, [%2] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmul.f32 q11, q1, %f10[0] \n"
"pld [%3, #256] \n"
"vld2.f32 {d4-d7}, [%3]! \n"
"vmla.f32 q0, q2, %e11[0] \n"
"vmla.f32 q10, q3, %e11[1] \n"
"pld [%3, #128] \n"
"vld2.f32 {d16-d17}, [%3] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f11[0] \n"
"pld [%4, #256] \n"
"vld2.f32 {d4-d7}, [%4]! \n"
"vmla.f32 q0, q2, %e12[0] \n"
"vmla.f32 q10, q3, %e12[1] \n"
"pld [%4, #128] \n"
"vld2.f32 {d16-d17}, [%4] \n"
"vext.32 q1, q2, q8, #1 \n"
"vmla.f32 q11, q1, %f12[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d4-d7}, [%2]! \n"
"vadd.f32 q0, q0, q10 \n"
"vadd.f32 q0, q0, q11 \n"
"subs %0, #1 \n"
"vst1.f32 {d0-d1}, [%1]! \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2) // %4
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"w"(_k0123), // %10
"w"(_k3456), // %11
"w"(_k6789) // %12
: "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain>0; remain--)
{
#if __ARM_NEON
float32x4_t _r00 = vld1q_f32(r0);
float32x4_t _r10 = vld1q_f32(r1);
float32x4_t _r20 = vld1q_f32(r2);
float32x4_t _sum = vmulq_f32(_r00, _k0123);
_sum = vmlaq_f32(_sum, _r10, _k3456);
_sum = vmlaq_f32(_sum, _r20, _k6789);
_sum = vsetq_lane_f32(*outptr, _sum, 3);
#if __aarch64__
*outptr = vaddvq_f32(_sum);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
*outptr = vget_lane_f32(_ss, 0);
#endif // __aarch64__
#else
float sum = 0;
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
*outptr += sum;
#endif // __ARM_NEON
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
ChConstraintRigidRigid.h | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2016 projectchrono.org
// All right reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Hammad Mazhar
// =============================================================================
//
// Description: This class handles rigid contact and computes corrections
// and jacobians
// =============================================================================
#pragma once
#include "chrono_parallel/ChDataManager.h"
#include "chrono_parallel/math/ChParallelMath.h"
namespace chrono {
class CH_PARALLEL_API ChConstraintRigidRigid {
public:
ChConstraintRigidRigid() {
data_manager = 0;
offset = 3;
inv_h = inv_hpa = inv_hhpa = 0;
}
~ChConstraintRigidRigid() {}
void Setup(ChParallelDataManager* data_container_) {
data_manager = data_container_;
uint num_contacts = data_manager->num_rigid_contacts;
inv_h = 1 / data_manager->settings.step_size;
inv_hpa = 1 / (data_manager->settings.step_size + data_manager->settings.solver.alpha);
inv_hhpa = inv_h * inv_hpa;
if (num_contacts > 0) {
contact_active_pairs.resize(int(num_contacts));
data_manager->host_data.coh_rigid_rigid.resize(num_contacts);
data_manager->host_data.fric_rigid_rigid.resize(num_contacts);
rotated_point_a.resize(num_contacts);
rotated_point_b.resize(num_contacts);
quat_a.resize(num_contacts);
quat_b.resize(num_contacts);
#pragma omp parallel for
for (int i = 0; i < (signed)num_contacts; i++) {
vec2 body = data_manager->host_data.bids_rigid_rigid[i];
uint b1 = body.x;
uint b2 = body.y;
contact_active_pairs[i] =
bool2(data_manager->host_data.active_rigid[b1] != 0, data_manager->host_data.active_rigid[b2] != 0);
////real coh = Max(
//// (data_manager->host_data.cohesion_data[b1] + data_manager->host_data.cohesion_data[b2]) * .5, 0.0);
real coh = Min(data_manager->host_data.cohesion_data[b1], data_manager->host_data.cohesion_data[b2]);
data_manager->host_data.coh_rigid_rigid[i] = coh;
real3 f_a = data_manager->host_data.fric_data[b1];
real3 f_b = data_manager->host_data.fric_data[b2];
real3 mu;
mu.x = (f_a.x == 0 || f_b.x == 0) ? 0 : (f_a.x + f_b.x) * .5;
mu.y = (f_a.y == 0 || f_b.y == 0) ? 0 : (f_a.y + f_b.y) * .5;
mu.z = (f_a.z == 0 || f_b.z == 0) ? 0 : (f_a.z + f_b.z) * .5;
data_manager->host_data.fric_rigid_rigid[i] = mu;
{
quaternion quaternion_conjugate = ~data_manager->host_data.rot_rigid[b1];
real3 sbar =
Rotate(data_manager->host_data.cpta_rigid_rigid[i] - data_manager->host_data.pos_rigid[b1],
quaternion_conjugate);
rotated_point_a[i] = real3_int(sbar, b1);
quat_a[i] = quaternion_conjugate;
}
{
quaternion quaternion_conjugate = ~data_manager->host_data.rot_rigid[b2];
real3 sbar =
Rotate(data_manager->host_data.cptb_rigid_rigid[i] - data_manager->host_data.pos_rigid[b2],
quaternion_conjugate);
rotated_point_b[i] = real3_int(sbar, b2);
quat_b[i] = quaternion_conjugate;
}
}
}
}
void Project(real* gamma);
void Project_Single(int index, real* gamma);
void host_Project_single(int index, vec2* ids, real3* friction, real* cohesion, real* gamma);
void func_Project_normal(int index, const vec2* ids, const real* cohesion, real* gam);
void func_Project_sliding(int index, const vec2* ids, const real3* fric, const real* cohesion, real* gam);
void func_Project_spinning(int index, const vec2* ids, const real3* fric, real* gam);
void Dx(const DynamicVector<real>& x, DynamicVector<real>& output);
void D_Tx(const DynamicVector<real>& x, DynamicVector<real>& output);
// Compute the vector of corrections
void Build_b();
// Compute the diagonal compliance matrix
void Build_E();
// Compute the jacobian matrix, no allocation is performed here,
// GenerateSparsity should take care of that
void Build_D();
void Build_s();
// Fill-in the non zero entries in the bilateral jacobian with ones.
// This operation is sequential.
void GenerateSparsity();
int offset;
protected:
custom_vector<bool2> contact_active_pairs;
real inv_h;
real inv_hpa;
real inv_hhpa;
custom_vector<real3_int> rotated_point_a, rotated_point_b;
custom_vector<quaternion> quat_a, quat_b;
// Pointer to the system's data manager
ChParallelDataManager* data_manager;
};
}
|
GB_binop__iseq_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint8)
// A*D function (colscale): GB (_AxD__iseq_uint8)
// D*A function (rowscale): GB (_DxB__iseq_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint8)
// C=scalar+B GB (_bind1st__iseq_uint8)
// C=scalar+B' GB (_bind1st_tran__iseq_uint8)
// C=A+scalar GB (_bind2nd__iseq_uint8)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT8 || GxB_NO_ISEQ_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int8_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int8_fp32
// op(A') function: GB_unop_tran__identity_int8_fp32
// C type: int8_t
// A type: float
// cast: int8_t cij = GB_cast_to_int8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = GB_cast_to_int8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int8_fp32
(
int8_t *Cx, // Cx and Ax may be aliased
const float *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
int8_t z = GB_cast_to_int8_t ((double) (aij)) ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int8_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(32*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(16*t1+Ny+29,32)),floord(32*t2+Ny+28,32)),floord(32*t1-32*t2+Nz+Ny+27,32));t3++) {
for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(32*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(32*t3+Nx+28,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),32*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),32*t3+30),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(2048*t4,t5+1);
ubv=min(2048*t4+2047,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
matmul.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define N 3000 /* number of rows in matrix A */
int main (int argc, char *argv[])
{
int i, j, k, chunk;
double a[N][N], /* matrix A to be multiplied */
b[N][N], /* matrix B to be multiplied */
c[N][N]; /* result matrix C */
chunk=10;
#pragma omp parallel for private(i,j,k) schedule(static,chunk)
for (i=0; i<N; i++)
for (j=0; j<N; j++){
a[i][j]= i+j;
b[i][j]= i*j;
c[i][j]= 0;
}
/*** Do matrix multiply sharing iterations on outer loop ***/
#pragma omp parallel for private(i,j,k) schedule(static,chunk)
for (i=0; i<N; i++)
for(k=0; k<N; k++)
for (j=0; j<N; j++)
c[i][j] += a[i][k] * b[k][j];
/****Print the last element of the loop****/
printf("%lf\n",c[N-1][N-1]);
}
|
3DConvolution.c | /**
* 3DConvolution.c: This file was adapted from PolyBench/GPU 1.0 test suite
* to run on GPU with OpenMP 4.0 pragmas and OpenCL driver.
*
* http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*
* Contacts: Marcio M Pereira <mpereira@ic.unicamp.br>
* Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br>
* Luís Felipe Mattos <ra107822@students.ic.unicamp.br>
*/
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "BenchmarksUtil.h"
// define the error threshold for the results "not matching"
#define ERROR_THRESHOLD 0.5
/* Problem size. */
#ifdef RUN_TEST
#define SIZE 1100
#elif RUN_BENCHMARK
#define SIZE 9600
#else
#define SIZE 1000
#endif
#define NI SIZE
#define NJ SIZE
#define NK SIZE
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void conv3D(DATA_TYPE *A, DATA_TYPE *B) {
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2;
c21 = +5;
c31 = -8;
c12 = -3;
c22 = +6;
c32 = -9;
c13 = +4;
c23 = +7;
c33 = +10;
for (j = 1; j < NJ - 1; ++j) {
for (i = 1; i < NI - 1; ++i) {
for (k = 1; k < NK - 1; ++k) {
B[i * (NK * NJ) + j * NK + k] =
c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] +
c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] +
c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] +
c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] +
c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] +
c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] +
c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] +
c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] +
c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)];
}
}
}
}
void conv3D_OMP(DATA_TYPE *A, DATA_TYPE *B) {
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2;
c21 = +5;
c31 = -8;
c12 = -3;
c22 = +6;
c32 = -9;
c13 = +4;
c23 = +7;
c33 = +10;
#pragma omp target teams distribute parallel for map(to:A[:NI*NJ*NK]) map(from:B[:NI*NJ*NK]) device(DEVICE_ID) private(i,k)
for (j = 1; j < NJ - 1; ++j) {
for (i = 1; i < NI - 1; ++i) {
for (k = 1; k < NK - 1; ++k) {
B[i * (NK * NJ) + j * NK + k] =
c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] +
c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] +
c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] +
c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] +
c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] +
c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] +
c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] +
c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] +
c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] +
c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)];
}
}
}
}
void init(DATA_TYPE *A) {
int i, j, k;
for (i = 0; i < NI; ++i) {
for (j = 0; j < NJ; ++j) {
for (k = 0; k < NK; ++k) {
A[i * (NK * NJ) + j * NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
int compareResults(DATA_TYPE *B, DATA_TYPE *B_GPU) {
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 1; i < NI - 1; ++i) {
for (j = 1; j < NJ - 1; ++j) {
for (k = 1; k < NK - 1; ++k) {
if (percentDiff(B[i * (NK * NJ) + j * NK + k],
B_GPU[i * (NK * NJ) + j * NK + k]) > ERROR_THRESHOLD) {
fail++;
}
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
int main(int argc, char *argv[]) {
double t_start, t_end;
int fail = 0;
DATA_TYPE *A;
DATA_TYPE *B;
DATA_TYPE *B_GPU;
A = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE));
B = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE));
B_GPU = (DATA_TYPE *)malloc(NI * NJ * NK * sizeof(DATA_TYPE));
fprintf(stdout, ">> Three dimensional (3D) convolution <<\n");
init(A);
t_start = rtclock();
conv3D_OMP(A, B_GPU);
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
#ifdef RUN_TEST
t_start = rtclock();
conv3D(A, B);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
fail = compareResults(B, B_GPU);
#endif
free(A);
free(B);
free(B_GPU);
return fail;
}
|
DenseSegment.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_DENSESEGMENT_H_
#define SRC_DENSESEGMENT_H_
#include <string>
#include "src/edgelist.h"
#include "src/bitvector.h"
template <typename T>
class segment_props
{
public:
T * value;
int * bit_vector;
bool allocated;
bool uninitialized;
int nnz;
T * compressed_data;
int * compressed_indices;
segment_props()
{
value = NULL;
bit_vector=NULL;
allocated=false;
uninitialized=true;
nnz=0;
}
void alloc(int capacity, int num_ints)
{
value = reinterpret_cast<T*>(_mm_malloc(capacity * sizeof(T) + num_ints*sizeof(int), 64));
bit_vector = reinterpret_cast<int*>( value + capacity);
compressed_data = reinterpret_cast<T*>(_mm_malloc(capacity * sizeof(T) + capacity*sizeof(int), 64));
allocated = true;
uninitialized = true;
}
};
template <typename T>
class DenseSegment {
public:
std::string name;
int capacity;
int num_ints;
segment_props<T> properties;
std::vector<segment_props<T> > to_be_received_properties;
std::vector<segment_props<T> > received_properties;
std::vector<segment_props<T> > uninitialized_properties;
DenseSegment()
{
capacity=0;
num_ints=0;
}
DenseSegment(int n) {
capacity = n;
num_ints = (n + sizeof(int) * 8 - 1) / (sizeof(int) * 8);
//std::cout << "Capacity, num_ints: " << capacity << "\t" << num_ints << std::endl;
}
DenseSegment(edge_t<T>* edges, int _m, int _nnz, int row_start)
{
capacity = _m;
num_ints = (capacity + sizeof(int) * 8 - 1) / (sizeof(int) * 8);
alloc();
initialize();
for (uint64_t i = 0; i < (uint64_t)_nnz; i++) {
int src = edges[i].src - row_start - 1;
set_bitvector(src, properties.bit_vector);
properties.value[src] = edges[i].val;
}
properties.nnz = _nnz;
properties.uninitialized = false;
}
~DenseSegment() {}
int compute_nnz()
{
if(properties.uninitialized) return 0;
int len = 0;
#pragma omp parallel for reduction(+:len)
for (int ii = 0 ; ii < num_ints ; ii++) {
int p = _popcnt32(properties.bit_vector[ii]);
len += p;
}
return len;
}
int compute_nnz(int start, int finish)
{
if(properties.uninitialized) return 0;
int len = 0;
#pragma omp parallel for reduction(+:len)
for (int ii = start ; ii < finish ; ii++) {
int p = _popcnt32(properties.bit_vector[ii]);
len += p;
}
return len;
}
bool should_compress(int test_nnz)
{
if(test_nnz > COMPRESSION_THRESHOLD * capacity)
{
return false;
}
return true;
}
void compress()
{
if(!should_compress(properties.nnz))
{
return;
}
assert(properties.allocated);
assert(!properties.uninitialized);
properties.compressed_indices = reinterpret_cast<int*>(properties.compressed_data + properties.nnz);
int npartitions = omp_get_max_threads() * 16;
int * partition_nnz = new int[npartitions];
int * partition_nnz_scan = new int[npartitions+1];
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int i_per_partition = (num_ints + npartitions - 1) / npartitions;
int start_i = i_per_partition * p;
int end_i = i_per_partition * (p+1);
if(end_i > num_ints) end_i = num_ints;
partition_nnz[p] = compute_nnz(start_i, end_i);
}
partition_nnz_scan[0] = 0;
properties.nnz = 0;
for(int p = 0 ; p < npartitions ; p++)
{
partition_nnz_scan[p+1] = partition_nnz_scan[p] + partition_nnz[p];
properties.nnz += partition_nnz[p];
}
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int i_per_partition = (num_ints + npartitions - 1) / npartitions;
int start_i = i_per_partition * p;
int end_i = i_per_partition * (p+1);
if(end_i > num_ints) end_i = num_ints;
int nzcnt = partition_nnz_scan[p];
for(int ii = start_i ; ii < end_i ; ii++)
{
if(_popcnt32(properties.bit_vector[ii]) == 0) continue;
for(int i = ii*32 ; i < (ii+1)*32 ; i++)
{
if(get_bitvector(i, properties.bit_vector))
{
properties.compressed_data[nzcnt] = properties.value[i];
properties.compressed_indices[nzcnt] = i;
nzcnt++;
}
}
}
}
delete [] partition_nnz;
delete [] partition_nnz_scan;
}
void decompress()
{
if(!should_compress(properties.nnz))
{
return;
}
assert(properties.allocated);
memset(properties.bit_vector, 0, num_ints* sizeof(int));
properties.compressed_indices = reinterpret_cast<int*>(properties.compressed_data + properties.nnz);
int npartitions = omp_get_max_threads();
int * start_nnzs = new int[npartitions];
int * end_nnzs = new int[npartitions];
int mystart = 0;
int my_nz_per = (properties.nnz + npartitions - 1) / npartitions;
my_nz_per = ((my_nz_per + 31) / 32) * 32;
for(int p = 0 ; p < npartitions ; p++)
{
start_nnzs[p] = mystart;
mystart += my_nz_per;
if(mystart > properties.nnz) mystart = properties.nnz;
end_nnzs[p] = mystart;
}
#pragma omp parallel for
for(int p = 0 ; p < npartitions ; p++)
{
int start_nnz = start_nnzs[p];
int end_nnz = end_nnzs[p];
for(int i = start_nnz ; i < end_nnz ; i++)
{
int idx = properties.compressed_indices[i];
set_bitvector(idx, properties.bit_vector);
properties.value[idx] = properties.compressed_data[i];
}
}
delete [] start_nnzs;
delete [] end_nnzs;
}
void set_uninitialized_received()
{
for(auto it = received_properties.begin() ; it != received_properties.end() ; it++)
{
it->uninitialized = true;
uninitialized_properties.push_back(*it);
}
received_properties.clear();
}
void set_uninitialized() {
set_uninitialized_received();
properties.uninitialized = true;
properties.nnz = 0;
}
void dealloc_received()
{
for(auto it = received_properties.begin() ; it != received_properties.end() ; it++)
{
if(it->allocated)
{
_mm_free(it->value);
}
}
received_properties.clear();
}
void dealloc() {
dealloc_received();
if (properties.allocated) {
_mm_free(properties.value);
properties.allocated = false;
properties.uninitialized = true;
properties.nnz = 0;
}
}
void alloc() {
if(!properties.allocated)
{
properties.alloc(capacity, num_ints);
}
}
void initialize()
{
if(properties.uninitialized)
{
memset(properties.bit_vector, 0, num_ints* sizeof(int));
}
properties.uninitialized = false;
}
int getNNZ()
{
return properties.nnz;
}
void set(int idx, T val) {
alloc();
initialize();
if(!get_bitvector(idx-1, properties.bit_vector)) properties.nnz++;
properties.value[idx - 1] = val;
set_bitvector(idx-1, properties.bit_vector);
properties.uninitialized = false;
}
void setAll(T val) {
alloc();
//initialize();
properties.uninitialized=false;
if(num_ints == 0) return;
properties.bit_vector[num_ints-1] = 0;
#pragma omp parallel for
for(int i = 0 ; i < num_ints-1 ; i++)
{
properties.bit_vector[i] = 0xFFFFFFFF;
}
for(int idx = std::max(0, capacity-32) ; idx < capacity ; idx++)
{
set_bitvector(idx, properties.bit_vector);
}
#pragma omp parallel for
for(int i = 0 ; i < capacity ; i++)
{
properties.value[i] = val;
}
}
T get(const int idx) const {
assert(properties.allocated);
assert(!properties.uninitialized);
return properties.value[idx - 1];
}
void send_tile_metadata(int myrank, int dst_rank, int output_rank, std::vector<MPI_Request>* requests) {
MPI_Send(&(properties.nnz), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
}
void recv_tile_metadata(int myrank, int src_rank, int output_rank,
std::vector<MPI_Request>* requests) {
// if uninitialized
if(properties.uninitialized)
{
MPI_Recv(&(properties.nnz), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
}
else
{
// Create new buffer
segment_props<T> new_properties;
if(uninitialized_properties.size() > 0)
{
new_properties = uninitialized_properties.back();
uninitialized_properties.pop_back();
}
MPI_Recv(&(new_properties.nnz), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
to_be_received_properties.push_back(new_properties);
}
}
void send_tile(int myrank, int dst_rank, int output_rank, std::vector<MPI_Request>* requests) {
MPI_Request r1;
MPI_Isend(properties.value, capacity * sizeof(T) + num_ints * sizeof(int), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD,
&r1);
requests->push_back(r1);
}
void recv_tile(int myrank, int src_rank, int output_rank,
std::vector<MPI_Request>* requests) {
alloc();
if(properties.uninitialized)
{
MPI_Request r1;
MPI_Irecv(properties.value, capacity * sizeof(T) + num_ints * sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r1);
requests->push_back(r1);
properties.uninitialized = false;
}
else
{
segment_props<T> new_properties = to_be_received_properties[0];
to_be_received_properties.erase(to_be_received_properties.begin());
new_properties.alloc(capacity, num_ints);
MPI_Request r1;
MPI_Irecv(new_properties.value, capacity * sizeof(T) + num_ints* sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r1);
requests->push_back(r1);
new_properties.uninitialized = false;
received_properties.push_back(new_properties);
}
}
void send_tile_compressed(int myrank, int dst_rank, int output_rank, std::vector<MPI_Request>* requests) {
if(!should_compress(properties.nnz))
{
send_tile(myrank, dst_rank, output_rank, requests);
return;
}
MPI_Request r1;
MPI_Isend(properties.compressed_data, properties.nnz * sizeof(T) + properties.nnz * sizeof(int), MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD,
&r1);
requests->push_back(r1);
}
void recv_tile_compressed(int myrank, int src_rank, int output_rank,
std::vector<MPI_Request>* requests) {
alloc();
if(properties.uninitialized)
{
if(!should_compress(properties.nnz))
{
recv_tile(myrank, src_rank, output_rank, requests);
return;
}
MPI_Request r1;
MPI_Irecv(properties.compressed_data, properties.nnz * sizeof(T) + properties.nnz * sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r1);
requests->push_back(r1);
properties.uninitialized = false;
}
else
{
segment_props<T> new_properties = to_be_received_properties[0];
if(!should_compress(new_properties.nnz))
{
recv_tile(myrank, src_rank, output_rank, requests);
return;
}
to_be_received_properties.erase(to_be_received_properties.begin());
new_properties.alloc(capacity, num_ints);
MPI_Request r1;
MPI_Irecv(new_properties.compressed_data, new_properties.nnz * sizeof(T) + new_properties.nnz * sizeof(int), MPI_BYTE, src_rank, 0, MPI_COMM_WORLD,
&r1);
requests->push_back(r1);
new_properties.uninitialized = false;
received_properties.push_back(new_properties);
}
}
};
#endif // SRC_DENSESEGMENT_H_
|
GB_extractTuples.c | //------------------------------------------------------------------------------
// GB_extractTuples: extract all the tuples from a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Extracts all tuples from a matrix, like [I,J,X] = find (A). If any
// parameter I, J and/or X is NULL, then that component is not extracted. The
// size of the I, J, and X arrays (those that are not NULL) is given by nvals,
// which must be at least as large as GrB_nvals (&nvals, A). The values in the
// matrix are typecasted to the type of X, as needed.
// This function does the work for the user-callable GrB_*_extractTuples
// functions, and helps build the tuples for GB_concat_hyper.
// Tf A is iso and X is not NULL, the iso scalar Ax [0] is expanded into X.
#include "GB.h"
#define GB_FREE_ALL \
{ \
GB_FREE_WORK (&Ap, Ap_size) ; \
GB_FREE_WORK (&X_bitmap, X_bitmap_size) ; \
}
GrB_Info GB_extractTuples // extract all tuples from a matrix
(
GrB_Index *I_out, // array for returning row indices of tuples
GrB_Index *J_out, // array for returning col indices of tuples
void *X, // array for returning values of tuples
GrB_Index *p_nvals, // I,J,X size on input; # tuples on output
const GB_Type_code xcode, // type of array X
const GrB_Matrix A, // matrix to extract tuples from
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GB_void *restrict X_bitmap = NULL ; size_t X_bitmap_size = 0 ;
int64_t *restrict Ap = NULL ; size_t Ap_size = 0 ;
ASSERT_MATRIX_OK (A, "A to extract", GB0) ;
ASSERT (p_nvals != NULL) ;
// delete any lingering zombies and assemble any pending tuples;
// allow A to remain jumbled
GB_MATRIX_WAIT_IF_PENDING_OR_ZOMBIES (A) ;
GB_BURBLE_DENSE (A, "(A %s) ") ;
ASSERT (xcode <= GB_UDT_code) ;
const GB_Type_code acode = A->type->code ;
const size_t asize = A->type->size ;
// xcode and A must be compatible
if (!GB_code_compatible (xcode, acode))
{
return (GrB_DOMAIN_MISMATCH) ;
}
const int64_t anz = GB_nnz (A) ;
if (anz == 0)
{
// no work to do
(*p_nvals) = 0 ;
return (GrB_SUCCESS) ;
}
int64_t nvals = *p_nvals ; // size of I,J,X on input
if (nvals < anz && (I_out != NULL || J_out != NULL || X != NULL))
{
// output arrays are not big enough
return (GrB_INSUFFICIENT_SPACE) ;
}
//-------------------------------------------------------------------------
// determine the number of threads to use
//-------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (anz + A->nvec, chunk, nthreads_max) ;
//-------------------------------------------------------------------------
// handle the CSR/CSC format
//--------------------------------------------------------------------------
GrB_Index *I, *J ;
if (A->is_csc)
{
I = I_out ;
J = J_out ;
}
else
{
I = J_out ;
J = I_out ;
}
//--------------------------------------------------------------------------
// bitmap case
//--------------------------------------------------------------------------
if (GB_IS_BITMAP (A))
{
//----------------------------------------------------------------------
// allocate workspace
//----------------------------------------------------------------------
bool need_typecast = (X != NULL) && (xcode != acode) ;
if (need_typecast)
{
// X must be typecasted
int64_t anzmax = GB_IMAX (anz, 1) ;
X_bitmap = GB_MALLOC_WORK (anzmax*asize, GB_void, &X_bitmap_size) ;
}
Ap = GB_MALLOC_WORK (A->vdim+1, int64_t, &Ap_size) ;
if (Ap == NULL || (need_typecast && X_bitmap == NULL))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// extract the tuples
//----------------------------------------------------------------------
// TODO: pass xcode to GB_convert_bitmap_worker and let it do the
// typecasting. This works for now, however.
// if A is iso, GB_convert_bitmap_worker expands the iso scalar
// into its result, X or X_bitmap
GB_OK (GB_convert_bitmap_worker (Ap, (int64_t *) I, (int64_t *) J,
(GB_void *) (need_typecast ? X_bitmap : X), NULL, A, Context)) ;
//----------------------------------------------------------------------
// typecast X if needed
//----------------------------------------------------------------------
if (need_typecast)
{
// typecast the values from X_bitmap into X
ASSERT (X != NULL) ;
ASSERT (xcode != acode) ;
GB_cast_array ((GB_void *) X, xcode, X_bitmap, acode, NULL, anz,
nthreads) ;
}
}
else
{
//----------------------------------------------------------------------
// sparse, hypersparse, or full case
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// extract the row indices
//----------------------------------------------------------------------
if (I != NULL)
{
if (A->i == NULL)
{
// A is full; construct the row indices
int64_t avlen = A->vlen ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
I [p] = (p % avlen) ;
}
}
else
{
GB_memcpy (I, A->i, anz * sizeof (int64_t), nthreads) ;
}
}
//----------------------------------------------------------------------
// extract the column indices
//----------------------------------------------------------------------
if (J != NULL)
{
GB_OK (GB_extract_vector_list ((int64_t *) J, A, Context)) ;
}
//----------------------------------------------------------------------
// extract the values
//----------------------------------------------------------------------
if (X != NULL)
{
if (A->iso)
{
// typecast the scalar and expand it into X
size_t xsize = GB_code_size (xcode, asize) ;
GB_void scalar [GB_VLA(xsize)] ;
GB_cast_scalar (scalar, xcode, A->x, acode, asize) ;
GB_iso_expand (X, anz, scalar, xsize, Context) ;
}
else if (xcode == acode)
{
// copy the values from A into X, no typecast
GB_memcpy (X, A->x, anz * asize, nthreads) ;
}
else
{
// typecast the values from A into X
ASSERT (X != NULL) ;
GB_cast_array ((GB_void *) X, xcode, (GB_void *) A->x, acode,
NULL, anz, nthreads) ;
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
*p_nvals = anz ; // number of tuples extracted
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
call_hpgmg_setup.c | #ifdef USE_MPI
#include <mpi.h>
#endif
void hpgmg_setup (const int log2_box_dim,
const int target_boxes_per_rank,
const int OMP_Threads,
const int OMP_Nested,
const int requested_threading_model,
const int actual_threading_model);
int
main (int argc, char *argv[])
{
const int log2_box_dim = 6;
const int target_boxes_per_rank = 1;
int OMP_Threads = 1;
int OMP_Nested = 0;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
OMP_Threads = omp_get_num_threads ();
OMP_Nested = omp_get_nested ();
}
}
#endif
#ifdef USE_MPI
int actual_threading_model = -1;
int requested_threading_model = -1;
requested_threading_model = MPI_THREAD_SINGLE;
//requested_threading_model = MPI_THREAD_FUNNELED;
//requested_threading_model = MPI_THREAD_SERIALIZED;
//requested_threading_model = MPI_THREAD_MULTIPLE;
//MPI_Init(&argc, &argv);
#ifdef _OPENMP
requested_threading_model = MPI_THREAD_FUNNELED;
//requested_threading_model = MPI_THREAD_SERIALIZED;
//requested_threading_model = MPI_THREAD_MULTIPLE;
//MPI_Init_thread(&argc, &argv, requested_threading_model, &actual_threading_model);
#endif
MPI_Init_thread (&argc, &argv, requested_threading_model,
&actual_threading_model);
#ifdef USE_HPM // IBM HPM counters for BGQ...
HPM_Init ();
#endif
#endif
hpgmg_setup (log2_box_dim,
target_boxes_per_rank,
OMP_Threads,
OMP_Nested, requested_threading_model, actual_threading_model);
return 0;
}
|
yolov2.h | #ifndef YOLOV2_H
#define YOLOV2_H
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <fcntl.h>
#include <string.h>
#include <assert.h>
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include "stb_image_write.h"
//#include "yolo_hls.h"
#ifndef FLT_MAX
#define FLT_MAX 3.402823466e+38F
#endif
typedef enum{
LOGISTIC, RELU, RELIE, LINEAR, RAMP, TANH, PLSE, LEAKY, ELU, LOGGY, STAIR, HARDTAN, LHTAN
} ACTIVATION;
typedef enum {
CONVOLUTIONAL,
DECONVOLUTIONAL,
CONNECTED,
MAXPOOL,
SOFTMAX,
DETECTION,
DROPOUT,
CROP,
ROUTE,
COST,
NORMALIZATION,
AVGPOOL,
LOCAL,
SHORTCUT,
ACTIVE,
RNN,
GRU,
LSTM,
CRNN,
BATCHNORM,
NETWORK,
XNOR,
REGION,
YOLO,
REORG,
UPSAMPLE,
LOGXENT,
L2NORM,
BLANK
} LAYER_TYPE;
struct network;
typedef struct network network;
struct layer;
typedef struct layer layer;
struct layer{
LAYER_TYPE type;
ACTIVATION activation;
void (*forward) (struct layer, struct network);
int batch_normalize;
int shortcut;
int batch;
int forced;
int flipped;
int inputs;
int outputs;
int nweights;
int nbiases;
int extra;
int truths;
int h,w,c;
int out_h, out_w, out_c;
int n;
int max_boxes;
int groups;
int size;
int side;
int stride;
int reverse;
int flatten;
int spatial;
int pad;
int sqrt;
int flip;
int index;
int binary;
int xnor;
int steps;
int hidden;
int truth;
float smooth;
float dot;
float angle;
float jitter;
float saturation;
float exposure;
float shift;
float ratio;
float learning_rate_scale;
float clip;
int softmax;
int classes;
int coords;
int background;
int rescore;
int objectness;
int joint;
int noadjust;
int reorg;
int log;
int tanh;
int *mask;
int total;
float alpha;
float beta;
float kappa;
float coord_scale;
float object_scale;
float noobject_scale;
float mask_scale;
float class_scale;
int bias_match;
int random;
float ignore_thresh;
float truth_thresh;
float thresh;
float focus;
int classfix;
int absolute;
int onlyforward;
int stopbackward;
// int dontload;
int dontsave;
// int dontloadscales;
float temperature;
float probability;
float scale;
char * cweights;
int * indexes;
int * input_layers;
int * input_sizes;
int * map;
float * rand;
float * cost;
float * state;
float * prev_state;
float * forgot_state;
float * forgot_delta;
float * state_delta;
float * combine_cpu;
float * combine_delta_cpu;
float * concat;
float * concat_delta;
float * binary_weights;
float * biases;
float * bias_updates;
float * scales;
float * scale_updates;
float * weights;
float * weight_updates;
float * delta;
float * output;
float * loss;
float * squared;
float * norms;
float * spatial_mean;
float * mean;
float * variance;
float * mean_delta;
float * variance_delta;
float * rolling_mean;
float * rolling_variance;
float * x;
float * x_norm;
float * m;
float * v;
float * bias_m;
float * bias_v;
float * scale_m;
float * scale_v;
float *z_cpu;
float *r_cpu;
float *h_cpu;
float * prev_state_cpu;
float *temp_cpu;
float *temp2_cpu;
float *temp3_cpu;
float *dh_cpu;
float *hh_cpu;
float *prev_cell_cpu;
float *cell_cpu;
float *f_cpu;
float *i_cpu;
float *g_cpu;
float *o_cpu;
float *c_cpu;
float *dc_cpu;
float * binary_input;
struct layer *input_layer;
struct layer *self_layer;
struct layer *output_layer;
struct layer *reset_layer;
struct layer *update_layer;
struct layer *state_layer;
struct layer *input_gate_layer;
struct layer *state_gate_layer;
struct layer *input_save_layer;
struct layer *state_save_layer;
struct layer *input_state_layer;
struct layer *state_state_layer;
struct layer *input_z_layer;
struct layer *state_z_layer;
struct layer *input_r_layer;
struct layer *state_r_layer;
struct layer *input_h_layer;
struct layer *state_h_layer;
struct layer *wz;
struct layer *uz;
struct layer *wr;
struct layer *ur;
struct layer *wh;
struct layer *uh;
struct layer *uo;
struct layer *wo;
struct layer *uf;
struct layer *wf;
struct layer *ui;
struct layer *wi;
struct layer *ug;
struct layer *wg;
//tree *softmax_tree;
size_t workspace_size;
};
void free_layer(layer l)
{
if(l.cweights) free(l.cweights);
if(l.indexes) free(l.indexes);
if(l.input_layers) free(l.input_layers);
if(l.input_sizes) free(l.input_sizes);
if(l.map) free(l.map);
if(l.rand) free(l.rand);
if(l.cost) free(l.cost);
if(l.state) free(l.state);
if(l.prev_state) free(l.prev_state);
if(l.forgot_state) free(l.forgot_state);
if(l.forgot_delta) free(l.forgot_delta);
if(l.state_delta) free(l.state_delta);
if(l.concat) free(l.concat);
if(l.concat_delta) free(l.concat_delta);
if(l.binary_weights) free(l.binary_weights);
if(l.biases) free(l.biases);
if(l.bias_updates) free(l.bias_updates);
if(l.scales) free(l.scales);
if(l.scale_updates) free(l.scale_updates);
if(l.weights) free(l.weights);
if(l.weight_updates) free(l.weight_updates);
if(l.delta) free(l.delta);
if(l.output) free(l.output);
if(l.squared) free(l.squared);
if(l.norms) free(l.norms);
if(l.spatial_mean) free(l.spatial_mean);
if(l.mean) free(l.mean);
if(l.variance) free(l.variance);
if(l.mean_delta) free(l.mean_delta);
if(l.variance_delta) free(l.variance_delta);
if(l.rolling_mean) free(l.rolling_mean);
if(l.rolling_variance) free(l.rolling_variance);
if(l.x) free(l.x);
if(l.x_norm) free(l.x_norm);
if(l.m) free(l.m);
if(l.v) free(l.v);
if(l.z_cpu) free(l.z_cpu);
if(l.r_cpu) free(l.r_cpu);
if(l.h_cpu) free(l.h_cpu);
if(l.binary_input) free(l.binary_input);
}
//void free_layer(layer);
typedef enum {
CONSTANT, STEP, EXP, POLY, STEPS, SIG, RANDOM
} learning_rate_policy;
typedef struct network{
int n;
int batch;
size_t *seen;
int *t;
float epoch;
int subdivisions;
layer *layers;
float *output;
learning_rate_policy policy;
float learning_rate;
float momentum;
float decay;
float gamma;
float scale;
float power;
int time_steps;
int step;
int max_batches;
float *scales;
int *steps;
int num_steps;
int burn_in;
int adam;
float B1;
float B2;
float eps;
int inputs;
int outputs;
int truths;
int notruth;
int h, w, c;
int max_crop;
int min_crop;
float max_ratio;
float min_ratio;
int center;
float angle;
float aspect;
float exposure;
float saturation;
float hue;
int random;
int gpu_index;
// tree *hierarchy;
float *input;
float *truth;
float *delta;
float *workspace;
int train;
int index;
float *cost;
float clip;
} network;
network *make_network(int n);
layer get_network_output_layer(network *net);
typedef struct {
int w;
int h;
float scale;
float rad;
float dx;
float dy;
float aspect;
} augment_args;
typedef struct {
int w;
int h;
int c;
float *data;
} image;
typedef struct{
float x, y, w, h;
} box;
typedef struct detection{
box bbox;
int classes;
float *prob;
float *mask;
float objectness;
int sort_class;
} detection;
typedef struct matrix{
int rows, cols;
float **vals;
} matrix;
typedef struct{
int w, h;
matrix X;
matrix y;
int shallow;
int *num_boxes;
box **boxes;
} data;
typedef enum {
CLASSIFICATION_DATA, DETECTION_DATA, CAPTCHA_DATA, REGION_DATA, IMAGE_DATA, COMPARE_DATA, WRITING_DATA, SWAG_DATA, TAG_DATA, OLD_CLASSIFICATION_DATA, STUDY_DATA, DET_DATA, SUPER_DATA, LETTERBOX_DATA, REGRESSION_DATA, SEGMENTATION_DATA, INSTANCE_DATA
} data_type;
typedef struct load_args{
int threads;
char **paths;
char *path;
int n;
int m;
char **labels;
int h;
int w;
int out_w;
int out_h;
int nh;
int nw;
int num_boxes;
int min, max, size;
int classes;
int background;
int scale;
int center;
int coords;
float jitter;
float angle;
float aspect;
float saturation;
float exposure;
float hue;
data *d;
image *im;
image *resized;
data_type type;
// tree *hierarchy;
} load_args;
typedef struct{
int id;
float x,y,w,h;
float left, right, top, bottom;
} box_label;
//network *load_network(char *cfg, char *weights, int clear);
//load_args get_base_args(network *net);
//void free_data(data d);
typedef struct{
char *key;
char *val;
int used;
} kvp;
typedef struct node{
void *val;
struct node *next;
struct node *prev;
} node;
typedef struct list{
int size;
node *front;
node *back;
} list;
void error(const char *s)
{
perror(s);
assert(0);
exit(-1);
}
void malloc_error()
{
fprintf(stderr, "Malloc error\n");
exit(-1);
}
void file_error(char *s)
{
fprintf(stderr, "Couldn't open file: %s\n", s);
exit(0);
}
/////////////////list begin
list *make_list()
{
list *l = (list *)malloc(sizeof(list));
l->size = 0;
l->front = 0;
l->back = 0;
return l;
}
void *list_pop(list *l){
if(!l->back) return 0;
node *b = l->back;
void *val = b->val;
l->back = b->prev;
if(l->back) l->back->next = 0;
free(b);
--l->size;
return val;
}
void list_insert(list *l, void *val)
{
node *new_node = (node *)malloc(sizeof(node));
new_node->val = val;
new_node->next = 0;
if(!l->back){
l->front = new_node;
new_node->prev = 0;
}else{
l->back->next = new_node;
new_node->prev = l->back;
}
l->back = new_node;
++l->size;
}
void free_node(node *n)
{
node *next;
while(n) {
next = n->next;
free(n);
n = next;
}
}
void free_list(list *l)
{
free_node(l->front);
free(l);
}
void free_list_contents(list *l)
{
node *n = l->front;
while(n){
free(n->val);
n = n->next;
}
}
void **list_to_array(list *l)
{
void **a = (void **)calloc(l->size, sizeof(void*));
int count = 0;
node *n = l->front;
while(n){
a[count++] = n->val;
n = n->next;
}
return a;
}
/////////////////list end
/////////////////////utils begin
void del_arg(int argc, char **argv, int index)
{
int i;
for(i = index; i < argc-1; ++i) argv[i] = argv[i+1];
argv[i] = 0;
}
int find_arg(int argc, char* argv[], char *arg)
{
int i;
for(i = 0; i < argc; ++i) {
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)) {
del_arg(argc, argv, i);
return 1;
}
}
return 0;
}
int find_int_arg(int argc, char **argv, char *arg, int def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atoi(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
float find_float_arg(int argc, char **argv, char *arg, float def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = atof(argv[i+1]);
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
char *find_char_arg(int argc, char **argv, char *arg, char *def)
{
int i;
for(i = 0; i < argc-1; ++i){
if(!argv[i]) continue;
if(0==strcmp(argv[i], arg)){
def = argv[i+1];
del_arg(argc, argv, i);
del_arg(argc, argv, i);
break;
}
}
return def;
}
unsigned char *read_file(char *filename)
{
FILE *fp = fopen(filename, "rb");
size_t size;
fseek(fp, 0, SEEK_END);
size = ftell(fp);
fseek(fp, 0, SEEK_SET);
unsigned char *text = (unsigned char *)calloc(size+1, sizeof(unsigned char));
fread(text, 1, size, fp);
fclose(fp);
return text;
}
list *split_str(char *s, char delim)
{
size_t i;
size_t len = strlen(s);
list *l = make_list();
list_insert(l, s);
for(i = 0; i < len; ++i){
if(s[i] == delim){
s[i] = '\0';
list_insert(l, &(s[i+1]));
}
}
return l;
}
void strip(char *s)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==' '||c=='\t'||c=='\n') ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void strip_char(char *s, char bad)
{
size_t i;
size_t len = strlen(s);
size_t offset = 0;
for(i = 0; i < len; ++i){
char c = s[i];
if(c==bad) ++offset;
else s[i-offset] = c;
}
s[len-offset] = '\0';
}
void free_ptrs(void **ptrs, int n)
{
int i;
for(i = 0; i < n; ++i) free(ptrs[i]);
free(ptrs);
}
char *fgetl(FILE *fp)
{
if(feof(fp)) return 0;
size_t size = 512;
char *line = (char *)malloc(size*sizeof(char));
if(!fgets(line, size, fp)){
free(line);
return 0;
}
size_t curr = strlen(line);
while((line[curr-1] != '\n') && !feof(fp)){
if(curr == size-1){
size *= 2;
line = (char *)realloc(line, size*sizeof(char));
if(!line) {
printf("%ld\n", size);
malloc_error();
}
}
size_t readsize = size-curr;
if(readsize > INT_MAX) readsize = INT_MAX-1;
fgets(&line[curr], readsize, fp);
curr = strlen(line);
}
if(line[curr-1] == '\n') line[curr-1] = '\0';
return line;
}
/////////////////////utils end
////////////////////option_list begin
void option_insert(list *l, char *key, char *val)
{
kvp *p = (kvp *)malloc(sizeof(kvp));
p->key = key;
p->val = val;
p->used = 0;
list_insert(l, p);
}
int read_option(char *s, list *options)
{
size_t i;
size_t len = strlen(s);
char *val = 0;
for(i = 0; i < len; ++i){
if(s[i] == '='){
s[i] = '\0';
val = s+i+1;
break;
}
}
if(i == len-1) return 0;
char *key = s;
option_insert(options, key, val);
return 1;
}
void option_unused(list *l)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(!p->used){
fprintf(stderr, "Unused field: '%s = %s'\n", p->key, p->val);
}
n = n->next;
}
}
char *option_find(list *l, char *key)
{
node *n = l->front;
while(n){
kvp *p = (kvp *)n->val;
if(strcmp(p->key, key) == 0){
p->used = 1;
return p->val;
}
n = n->next;
}
return 0;
}
char *option_find_str(list *l, char *key, char *def)
{
char *v = option_find(l, key);
if(v) return v;
if(def) fprintf(stderr, "%s: Using default '%s'\n", key, def);
return def;
}
int option_find_int(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
fprintf(stderr, "%s: Using default '%d'\n", key, def);
return def;
}
int option_find_int_quiet(list *l, char *key, int def)
{
char *v = option_find(l, key);
if(v) return atoi(v);
return def;
}
float option_find_float_quiet(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
return def;
}
float option_find_float(list *l, char *key, float def)
{
char *v = option_find(l, key);
if(v) return atof(v);
fprintf(stderr, "%s: Using default '%lf'\n", key, def);
return def;
}
list *read_data_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
///////////////////option_list end
image make_empty_image(int w, int h, int c)
{
image out;
out.data = 0;
out.h = h;
out.w = w;
out.c = c;
return out;
}
list *get_paths(char *filename)
{
char *path;
FILE *file = fopen(filename, "r");
if(!file) file_error(filename);
list *lines = make_list();
while((path=fgetl(file))){
list_insert(lines, path);
}
fclose(file);
return lines;
}
char **get_labels(char *filename)
{
list *plist = get_paths(filename);
char **labels = (char **)list_to_array(plist);
free_list(plist);
return labels;
}
image make_image(int w, int h, int c)
{
image out = make_empty_image(w,h,c);
out.data = (float *)calloc(h*w*c, sizeof(float));
return out;
}
static float get_pixel(image m, int x, int y, int c)
{
assert(x < m.w && y < m.h && c < m.c);
return m.data[c*m.h*m.w + y*m.w + x];
}
static void set_pixel(image m, int x, int y, int c, float val)
{
if (x < 0 || y < 0 || c < 0 || x >= m.w || y >= m.h || c >= m.c) return;
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] = val;
}
static void add_pixel(image m, int x, int y, int c, float val)
{
assert(x < m.w && y < m.h && c < m.c);
m.data[c*m.h*m.w + y*m.w + x] += val;
}
void free_image(image m)
{
if(m.data){
free(m.data);
}
}
image resize_image(image im, int w, int h)
{
image resized = make_image(w, h, im.c);
image part = make_image(w, im.h, im.c);
int r, c, k;
float w_scale = (float)(im.w - 1) / (w - 1);
float h_scale = (float)(im.h - 1) / (h - 1);
for(k = 0; k < im.c; ++k){
for(r = 0; r < im.h; ++r){
for(c = 0; c < w; ++c){
float val = 0;
if(c == w-1 || im.w == 1){
val = get_pixel(im, im.w-1, r, k);
} else {
float sx = c*w_scale;
int ix = (int) sx;
float dx = sx - ix;
val = (1 - dx) * get_pixel(im, ix, r, k) + dx * get_pixel(im, ix+1, r, k);
}
set_pixel(part, c, r, k, val);
}
}
}
for(k = 0; k < im.c; ++k){
for(r = 0; r < h; ++r){
float sy = r*h_scale;
int iy = (int) sy;
float dy = sy - iy;
for(c = 0; c < w; ++c){
float val = (1-dy) * get_pixel(part, c, iy, k);
set_pixel(resized, c, r, k, val);
}
if(r == h-1 || im.h == 1) continue;
for(c = 0; c < w; ++c){
float val = dy * get_pixel(part, c, iy+1, k);
add_pixel(resized, c, r, k, val);
}
}
}
free_image(part);
return resized;
}
void fill_image(image m, float s)
{
int i;
for(i = 0; i < m.h*m.w*m.c; ++i) m.data[i] = s;
}
void embed_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x,y,k);
set_pixel(dest, dx+x, dy+y, k, val);
}
}
}
}
image letterbox_image(image im, int w, int h)
{
int new_w = im.w;
int new_h = im.h;
if (((float)w/im.w) < ((float)h/im.h)) {
new_w = w;
new_h = (im.h * w)/im.w;
} else {
new_h = h;
new_w = (im.w * h)/im.h;
}
image resized = resize_image(im, new_w, new_h);
image boxed = make_image(w, h, im.c);
fill_image(boxed, .5);
//int i;
//for(i = 0; i < boxed.w*boxed.h*boxed.c; ++i) boxed.data[i] = 0;
embed_image(resized, boxed, (w-new_w)/2, (h-new_h)/2);
free_image(resized);
return boxed;
}
image load_image_stb(char *filename, int channels)
{
int w, h, c;
unsigned char *data = stbi_load(filename, &w, &h, &c, channels);
if (!data) {
fprintf(stderr, "Cannot load image \"%s\"\nSTB Reason: %s\n", filename, stbi_failure_reason());
exit(0);
}
if(channels) c = channels;
int i,j,k;
image im = make_image(w, h, c);
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int dst_index = i + w*j + w*h*k;
int src_index = k + c*i + c*w*j;
im.data[dst_index] = (float)data[src_index]/255.;
}
}
}
free(data);
return im;
}
void save_image_png(image im, const char *name)
{
char buff[256];
//sprintf(buff, "%s (%d)", name, windows);
sprintf(buff, "%s.png", name);
unsigned char *data = (unsigned char *)calloc(im.w*im.h*im.c, sizeof(char));
int i,k;
for(k = 0; k < im.c; ++k){
for(i = 0; i < im.w*im.h; ++i){
data[i*im.c+k] = (unsigned char) (255*im.data[i + k*im.w*im.h]);
}
}
int success = stbi_write_png(buff, im.w, im.h, im.c, data, im.w*im.c);
free(data);
if(!success) fprintf(stderr, "Failed to write image %s\n", buff);
}
image **load_alphabet()
{
int i, j;
const int nsize = 8;
image **alphabets = (image **)calloc(nsize, sizeof(image));
for(j = 0; j < nsize; ++j){
alphabets[j] = (image *)calloc(128, sizeof(image));
for(i = 32; i < 127; ++i){
char buff[256];
sprintf(buff, "labels/%d_%d.png", i, j);
//alphabets[j][i] = load_image_color(buff, 0, 0);
alphabets[j][i] = load_image_stb(buff, 3);
}
}
return alphabets;
}
///////////////////activation begin
static inline float stair_activate(float x)
{
int n = floor(x);
if (n%2 == 0) return floor(x/2.);
else return (x - n) + floor(x/2.);
}
static inline float hardtan_activate(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
static inline float linear_activate(float x){return x;}
static inline float logistic_activate(float x){return 1./(1. + exp(-x));}
static inline float loggy_activate(float x){return 2./(1. + exp(-x)) - 1;}
static inline float relu_activate(float x){return x*(x>0);}
static inline float elu_activate(float x){return (x >= 0)*x + (x < 0)*(exp(x)-1);}
static inline float relie_activate(float x){return (x>0) ? x : .01*x;}
static inline float ramp_activate(float x){return x*(x>0)+.1*x;}
static inline float leaky_activate(float x){return (x>0) ? x : .1*x;}
static inline float tanh_activate(float x){return (exp(2*x)-1)/(exp(2*x)+1);}
static inline float plse_activate(float x)
{
if(x < -4) return .01 * (x + 4);
if(x > 4) return .01 * (x - 4) + 1;
return .125*x + .5;
}
static inline float lhtan_activate(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
static inline float lhtan_gradient(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
static inline float hardtan_gradient(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
static inline float linear_gradient(float x){return 1;}
static inline float logistic_gradient(float x){return (1-x)*x;}
static inline float loggy_gradient(float x)
{
float y = (x+1.)/2.;
return 2*(1-y)*y;
}
static inline float stair_gradient(float x)
{
if (floor(x) == x) return 0;
return 1;
}
static inline float relu_gradient(float x){return (x>0);}
static inline float elu_gradient(float x){return (x >= 0) + (x < 0)*(x + 1);}
static inline float relie_gradient(float x){return (x>0) ? 1 : .01;}
static inline float ramp_gradient(float x){return (x>0)+.1;}
static inline float leaky_gradient(float x){return (x>0) ? 1 : .1;}
static inline float tanh_gradient(float x){return 1-x*x;}
static inline float plse_gradient(float x){return (x < 0 || x > 1) ? .01 : .125;}
char *get_activation_string(ACTIVATION a)
{
switch(a){
case LOGISTIC:
return "logistic";
case LOGGY:
return "loggy";
case RELU:
return "relu";
case ELU:
return "elu";
case RELIE:
return "relie";
case RAMP:
return "ramp";
case LINEAR:
return "linear";
case TANH:
return "tanh";
case PLSE:
return "plse";
case LEAKY:
return "leaky";
case STAIR:
return "stair";
case HARDTAN:
return "hardtan";
case LHTAN:
return "lhtan";
default:
break;
}
return "relu";
}
ACTIVATION get_activation(char *s)
{
if (strcmp(s, "logistic")==0) return LOGISTIC;
if (strcmp(s, "loggy")==0) return LOGGY;
if (strcmp(s, "relu")==0) return RELU;
if (strcmp(s, "elu")==0) return ELU;
if (strcmp(s, "relie")==0) return RELIE;
if (strcmp(s, "plse")==0) return PLSE;
if (strcmp(s, "hardtan")==0) return HARDTAN;
if (strcmp(s, "lhtan")==0) return LHTAN;
if (strcmp(s, "linear")==0) return LINEAR;
if (strcmp(s, "ramp")==0) return RAMP;
if (strcmp(s, "leaky")==0) return LEAKY;
if (strcmp(s, "tanh")==0) return TANH;
if (strcmp(s, "stair")==0) return STAIR;
fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s);
return RELU;
}
float activate(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate(x);
case LOGISTIC:
return logistic_activate(x);
case LOGGY:
return loggy_activate(x);
case RELU:
return relu_activate(x);
case ELU:
return elu_activate(x);
case RELIE:
return relie_activate(x);
case RAMP:
return ramp_activate(x);
case LEAKY:
return leaky_activate(x);
case TANH:
return tanh_activate(x);
case PLSE:
return plse_activate(x);
case STAIR:
return stair_activate(x);
case HARDTAN:
return hardtan_activate(x);
case LHTAN:
return lhtan_activate(x);
}
return 0;
}
void activate_array(float *x, const int n, const ACTIVATION a)
{
int i;
for(i = 0; i < n; ++i){
x[i] = activate(x[i], a);
}
}
float gradient(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_gradient(x);
case LOGISTIC:
return logistic_gradient(x);
case LOGGY:
return loggy_gradient(x);
case RELU:
return relu_gradient(x);
case ELU:
return elu_gradient(x);
case RELIE:
return relie_gradient(x);
case RAMP:
return ramp_gradient(x);
case LEAKY:
return leaky_gradient(x);
case TANH:
return tanh_gradient(x);
case PLSE:
return plse_gradient(x);
case STAIR:
return stair_gradient(x);
case HARDTAN:
return hardtan_gradient(x);
case LHTAN:
return lhtan_gradient(x);
}
return 0;
}
///////////////////activation end
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void fill_cpu(int N, float ALPHA, float *X, int INCX)
{
int i;
for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
}
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float s1, float s2, float *out)
{
int stride = w1/w2;
int sample = w2/w1;
assert(stride == h1/h2);
assert(sample == h2/h1);
//printf("shorcut_layer batch=%d,stride=%d,sample=%d\n",batch,stride,sample);
if(stride < 1) stride = 1;
if(sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i,j,k,b;
for(b = 0; b < batch; ++b){
for(k = 0; k < minc; ++k){
for(j = 0; j < minh; ++j){
for(i = 0; i < minw; ++i){
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] = s1*out[out_index] + s2*add[add_index];
}
}
}
}
}
void forward_shortcut_layer(const layer l, network net)
{
//copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
//shortcut_cpu(l.batch, l.w, l.h, l.c, net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.alpha, l.beta, l.output);
//activate_array(l.output, l.outputs*l.batch, l.activation);
int w = l.w;
int h = l.h;
int c = l.c;
float *add = net.layers[l.index].output;
float *out = l.output;
float *in = net.input;
int i,j,k;
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int index = i + w*(j + h*k );
out[index] = in[index] + add[index];
}
}
}
}
layer make_shortcut_layer(int batch, int index, int w, int h, int c, int w2, int h2, int c2)
{
fprintf(stderr, "res %3d %4d x%4d x%4d -> %4d x%4d x%4d\n",index, w2,h2,c2, w,h,c);
layer l;
memset(&l,0,sizeof(layer));
l.type = SHORTCUT;
l.batch = batch;
l.w = w2;
l.h = h2;
l.c = c2;
l.out_w = w;
l.out_h = h;
l.out_c = c;
l.outputs = w*h*c;
l.inputs = l.outputs;
l.index = index;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_shortcut_layer;
return l;
}
int convolutional_out_height(layer l)
{
return (l.h + 2*l.pad - l.size) / l.stride + 1;
}
int convolutional_out_width(layer l)
{
return (l.w + 2*l.pad - l.size) / l.stride + 1;
}
static size_t get_workspace_size(layer l){
return (size_t)l.out_h*l.out_w*l.size*l.size*l.c/l.groups*sizeof(float);
}
void add_bias(float *output, float *biases, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] += biases[i];
}
}
}
}
void scale_bias(float *output, float *scales, int batch, int n, int size)
{
int i,j,b;
for(b = 0; b < batch; ++b){
for(i = 0; i < n; ++i){
for(j = 0; j < size; ++j){
output[(b*n + i)*size + j] *= scales[i];
}
}
}
}
float im2col_get_pixel(float *im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width*(row + height*channel)];
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c,h,w;
int height_col = (height + 2*pad - ksize) / stride + 1;
int width_col = (width + 2*pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
#pragma omp parallel for
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
register float A_PART = ALPHA*A[i*lda+k];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
if(!TA && !TB)
gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(TA && !TB)
// gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else if(!TA && TB)
// gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
//else
// gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
{
int b, f, i;
for(b = 0; b < batch; ++b){
for(f = 0; f < filters; ++f){
for(i = 0; i < spatial; ++i){
int index = b*filters*spatial + f*spatial + i;
x[index] = (x[index] - mean[f])/(sqrt(variance[f]) + .000001f);
}
}
}
}
void forward_batchnorm_layer(layer l, network net)//for conv
{
normalize_cpu(l.output, l.rolling_mean, l.rolling_variance, l.batch, l.out_c, l.out_h*l.out_w);
scale_bias(l.output, l.scales, l.batch, l.out_c, l.out_h*l.out_w);
add_bias(l.output, l.biases, l.batch, l.out_c, l.out_h*l.out_w);
}
void CONV_Padding_Relu(float *Input,float *Output,float *Weight,const int InFM_num,const int OutFM_num,const int Kernel_size,const int Kernel_stride,const int Input_w,const int Input_h,const int Padding)
{
// (output_w - 1)*Kernel_stride + Kernel_size = Input_w
const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
int x, y, of, inf;
int m,n;
for( of = 0; of < OutFM_num; of++){
for( y = 0; y < output_h; y++) {
for( x = 0; x < output_w; x++){
float tmp = 0.0;
for(inf = 0;inf < InFM_num; inf++)
{
int intput_offset = inf*Input_w*Input_h + (y*Kernel_stride - Padding)*Input_w + x*Kernel_stride - Padding;
for(m = 0;m < Kernel_size; m++)
{
for(n = 0;n < Kernel_size; n++)
{
int kernel_offset = of*InFM_num*Kernel_size*Kernel_size + inf*Kernel_size*Kernel_size;
bool inFM_width = ((x*Kernel_stride + n - Padding) >= 0)&&((x*Kernel_stride + n - Padding) < Input_w);
bool inFM_height = ((y*Kernel_stride + m - Padding) >= 0)&&((y*Kernel_stride + m - Padding) < Input_h);
if(inFM_width&&inFM_height)
tmp += Weight[kernel_offset + m*Kernel_size + n]*Input[intput_offset + m*Input_w + n];
}
}
}
Output[of*output_w*output_h + y*output_w + x] = tmp;
}
}
}
}
void forward_convolutional_layer(layer l, network net)
{
int i, j;
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//printf("c=%d,n=%d,size=%d,stride=%d,w=%d,h=%d,pad=%d\n",l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
//int m = l.n/l.groups;
//int k = l.size*l.size*l.c/l.groups;
//int n = l.out_w*l.out_h;
//for(i = 0; i < l.batch; ++i){
// for(j = 0; j < l.groups; ++j){
// float *a = l.weights + j*l.nweights/l.groups;
// float *b = net.workspace;
// float *c = l.output + (i*l.groups + j)*n*m;
// im2col_cpu(net.input + (i*l.groups + j)*l.c/l.groups*l.h*l.w,
// l.c/l.groups, l.h, l.w, l.size, l.stride, l.pad, b);
// gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
// }
//}
int m = l.n;
int k = l.size*l.size*l.c;
int n = l.out_w*l.out_h;
float *a = l.weights;
float *b = net.workspace;
float *c = l.output;
im2col_cpu(net.input,l.c, l.h, l.w, l.size, l.stride, l.pad, b);
gemm(0,0,m,n,k,1,a,k,b,n,1,c,n);
//CONV_Padding_Relu(net.input,l.output,l.weights,l.c,l.n,l.size,l.stride,l.w,l.h,l.pad);
if(l.batch_normalize){
forward_batchnorm_layer(l, net);
} else {
add_bias(l.output, l.biases, l.batch, l.n, l.out_h*l.out_w);
}
activate_array(l.output, l.outputs*l.batch, l.activation);
}
layer make_convolutional_layer(int batch, int h, int w, int c, int n, int groups, int size, int stride, int padding, ACTIVATION activation, int batch_normalize, int binary, int xnor, int adam)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = CONVOLUTIONAL;
l.groups = groups;
l.h = h;
l.w = w;
l.c = c;
l.n = n;
l.binary = binary;
l.xnor = xnor;
l.batch = batch;
l.stride = stride;
l.size = size;
l.pad = padding;
l.batch_normalize = batch_normalize;
// l.weights = (float *)calloc(c/groups*n*size*size, sizeof(float));
// l.biases = (float *)calloc(n, sizeof(float));
l.nweights = c/groups*n*size*size;
l.nbiases = n;
int out_w = convolutional_out_width(l);
int out_h = convolutional_out_height(l);
l.out_h = out_h;
l.out_w = out_w;
l.out_c = n;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = l.w * l.h * l.c;
// l.output = (float *)calloc(l.batch*l.outputs, sizeof(float));
l.forward = forward_convolutional_layer;
if(batch_normalize){
// l.scales = (float *)calloc(n, sizeof(float));
// l.rolling_mean = (float *)calloc(n, sizeof(float));
//l.rolling_variance = (float *)calloc(n, sizeof(float));
}
l.workspace_size = get_workspace_size(l);
l.activation = activation;
fprintf(stderr, "conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n", n, size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
return l;
}
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = b*w*h*c + k*w*h + (j/stride)*w + i/stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if(forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
void forward_upsample_layer(const layer l, network net)
{
//fill_cpu(l.outputs*l.batch, 0, l.output, 1);
//upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
int c = l.c;
int h = l.h;
int w = l.w;
int stride = l.stride;
float *in = net.input;
float *out = l.output;
int i, j, k;
for(k = 0; k < c; ++k){
for(j = 0; j < h*stride; ++j){
for(i = 0; i < w*stride; ++i){
int in_index = k*w*h + (j/stride)*w + i/stride;
int out_index = k*w*h*stride*stride + j*w*stride + i;
out[out_index] = in[in_index];
}
}
}
}
layer make_upsample_layer(int batch, int w, int h, int c, int stride)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = UPSAMPLE;
l.batch = batch;
l.w = w;
l.h = h;
l.c = c;
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c;
if(stride < 0){
stride = -stride;
l.reverse=1;
l.out_w = w/stride;
l.out_h = h/stride;
}
l.stride = stride;
l.outputs = l.out_w*l.out_h*l.out_c;
l.inputs = l.w*l.h*l.c;
l.output = (float *)calloc(l.outputs*batch, sizeof(float));;
l.forward = forward_upsample_layer;
if(l.reverse) fprintf(stderr, "downsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
else fprintf(stderr, "upsample %2dx %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
void forward_route_layer(const layer l, network net)
{
int i, j;
int offset = 0;
for(i = 0; i < l.n; ++i){
int index = l.input_layers[i];
float *input = net.layers[index].output;
int input_size = l.input_sizes[i];
copy_cpu(input_size, input, 1, l.output + offset, 1);
offset += input_size;
}
}
layer make_route_layer(int batch, int n, int *input_layers, int *input_sizes)
{
fprintf(stderr,"route ");
layer l;
memset(&l,0,sizeof(layer));
l.type = ROUTE;
l.batch = batch;
l.n = n;
l.input_layers = input_layers;
l.input_sizes = input_sizes;
int i;
int outputs = 0;
for(i = 0; i < n; ++i){
fprintf(stderr," %d", input_layers[i]);
outputs += input_sizes[i];
}
fprintf(stderr, "\n");
l.outputs = outputs;
l.inputs = outputs;
// l.output = (float *)calloc(outputs*batch, sizeof(float));;
l.forward = forward_route_layer;
return l;
}
static int entry_index(layer l, int batch, int location, int entry)
{
int n = location / (l.w*l.h);
int loc = location % (l.w*l.h);
return batch*l.outputs + n*l.w*l.h*(4+l.classes+1) + entry*l.w*l.h + loc;
}
void forward_yolo_layer(const layer l, network net)
{
int i,j,b,t,n;
//char line[256];
//FILE *fp3;
//char filename[256];
//sprintf(filename, "yolo_layer_%d.txt", l.outputs);
//printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
// if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
//int x;
// for( x = 0; x < l.outputs; x++)
//{
// sprintf(line, "%f\n", net.input[x]);
// if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
// }
// fclose(fp3);
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array(l.output + index, (1+l.classes)*l.w*l.h, LOGISTIC);
}
}
return ;
}
layer make_yolo_layer(int batch, int w, int h, int n, int total, int *mask, int classes)
{
int i;
layer l;
memset(&l,0,sizeof(layer));
l.type = YOLO;
l.n = n;
l.total = total;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + 4 + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
//l.cost = (float *)calloc(1, sizeof(float));
l.biases = (float *)calloc(total*2, sizeof(float));
if(mask) l.mask = mask;
else{
l.mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
l.mask[i] = i;
}
}
//l.bias_updates = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + 4 + 1);
l.inputs = l.outputs;
//l.truths = 90*(4 + 1);
//l.delta = (float *)calloc(batch*l.outputs, sizeof(float));
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
for(i = 0; i < total*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_yolo_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
/////////////////praser begin
typedef struct{
char *type;
list *options;
}section;
list *read_cfg(char *filename);
LAYER_TYPE string_to_layer_type(char * type)
{
if (strcmp(type, "[shortcut]")==0) return SHORTCUT;
if (strcmp(type, "[crop]")==0) return CROP;
if (strcmp(type, "[cost]")==0) return COST;
if (strcmp(type, "[detection]")==0) return DETECTION;
if (strcmp(type, "[region]")==0) return REGION;
if (strcmp(type, "[yolo]")==0) return YOLO;
if (strcmp(type, "[local]")==0) return LOCAL;
if (strcmp(type, "[conv]")==0
|| strcmp(type, "[convolutional]")==0) return CONVOLUTIONAL;
if (strcmp(type, "[deconv]")==0
|| strcmp(type, "[deconvolutional]")==0) return DECONVOLUTIONAL;
if (strcmp(type, "[activation]")==0) return ACTIVE;
if (strcmp(type, "[logistic]")==0) return LOGXENT;
if (strcmp(type, "[l2norm]")==0) return L2NORM;
if (strcmp(type, "[net]")==0
|| strcmp(type, "[network]")==0) return NETWORK;
if (strcmp(type, "[crnn]")==0) return CRNN;
if (strcmp(type, "[gru]")==0) return GRU;
if (strcmp(type, "[lstm]") == 0) return LSTM;
if (strcmp(type, "[rnn]")==0) return RNN;
if (strcmp(type, "[conn]")==0
|| strcmp(type, "[connected]")==0) return CONNECTED;
if (strcmp(type, "[max]")==0
|| strcmp(type, "[maxpool]")==0) return MAXPOOL;
if (strcmp(type, "[reorg]")==0) return REORG;
if (strcmp(type, "[avg]")==0
|| strcmp(type, "[avgpool]")==0) return AVGPOOL;
if (strcmp(type, "[dropout]")==0) return DROPOUT;
if (strcmp(type, "[lrn]")==0
|| strcmp(type, "[normalization]")==0) return NORMALIZATION;
if (strcmp(type, "[batchnorm]")==0) return BATCHNORM;
if (strcmp(type, "[soft]")==0
|| strcmp(type, "[softmax]")==0) return SOFTMAX;
if (strcmp(type, "[route]")==0) return ROUTE;
if (strcmp(type, "[upsample]")==0) return UPSAMPLE;
return BLANK;
}
void free_section(section *s)
{
free(s->type);
node *n = s->options->front;
while(n){
kvp *pair = (kvp *)n->val;
free(pair->key);
free(pair);
node *next = n->next;
free(n);
n = next;
}
free(s->options);
free(s);
}
void parse_data(char *data, float *a, int n)
{
int i;
if(!data) return;
char *curr = data;
char *next = data;
int done = 0;
for(i = 0; i < n && !done; ++i){
while(*++next !='\0' && *next != ',');
if(*next == '\0') done = 1;
*next = '\0';
sscanf(curr, "%g", &a[i]);
curr = next+1;
}
}
typedef struct size_params{
int batch;
int inputs;
int h;
int w;
int c;
int index;
int time_steps;
network *net;
} size_params;
layer parse_convolutional(list *options, size_params params)
{
int n = option_find_int(options, "filters",1);
int size = option_find_int(options, "size",1);
int stride = option_find_int(options, "stride",1);
int pad = option_find_int_quiet(options, "pad",0);
int padding = option_find_int_quiet(options, "padding",0);
int groups = option_find_int_quiet(options, "groups", 1);
if(pad) padding = size/2;
char *activation_s = option_find_str(options, "activation", "logistic");
ACTIVATION activation = get_activation(activation_s);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before convolutional layer must output image.");
int batch_normalize = option_find_int_quiet(options, "batch_normalize", 0);
int binary = option_find_int_quiet(options, "binary", 0);
int xnor = option_find_int_quiet(options, "xnor", 0);
layer l = make_convolutional_layer(batch,h,w,c,n,groups,size,stride,padding,activation, batch_normalize, binary, xnor, params.net->adam);
l.flipped = option_find_int_quiet(options, "flipped", 0);
l.dot = option_find_float_quiet(options, "dot", 0);
return l;
}
int *parse_yolo_mask(char *a, int *num)
{
int *mask = 0;
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
mask = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int val = atoi(a);
mask[i] = val;
a = strchr(a, ',')+1;
}
*num = n;
}
return mask;
}
layer parse_yolo(list *options, size_params params)
{
int classes = option_find_int(options, "classes", 20);
int total = option_find_int(options, "num", 1);
int num = total;
char *a = option_find_str(options, "mask", 0);
int *mask = parse_yolo_mask(a, &num);
layer l = make_yolo_layer(params.batch, params.w, params.h, num, total, mask, classes);
assert(l.outputs == params.inputs);
l.max_boxes = option_find_int_quiet(options, "max",90);
l.jitter = option_find_float(options, "jitter", .2);
l.ignore_thresh = option_find_float(options, "ignore_thresh", .5);
l.truth_thresh = option_find_float(options, "truth_thresh", 1);
l.random = option_find_int_quiet(options, "random", 0);
a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
layer parse_shortcut(list *options, size_params params, network *net)
{
char *l = option_find(options, "from");
int index = atoi(l);
if(index < 0) index = params.index + index;
int batch = params.batch;
layer from = net->layers[index];
layer s = make_shortcut_layer(batch, index, params.w, params.h, params.c, from.out_w, from.out_h, from.out_c);
char *activation_s = option_find_str(options, "activation", "linear");
ACTIVATION activation = get_activation(activation_s);
s.activation = activation;
s.alpha = option_find_float_quiet(options, "alpha", 1);
s.beta = option_find_float_quiet(options, "beta", 1);
return s;
}
layer parse_upsample(list *options, size_params params, network *net)
{
int stride = option_find_int(options, "stride",2);
layer l = make_upsample_layer(params.batch, params.w, params.h, params.c, stride);
l.scale = option_find_float_quiet(options, "scale", 1);
return l;
}
layer parse_route(list *options, size_params params, network *net)
{
char *l = option_find(options, "layers");
int len = strlen(l);
if(!l) error("Route Layer must specify input layers");
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *layers = (int *)calloc(n, sizeof(int));
int *sizes = (int *)calloc(n, sizeof(int));
for(i = 0; i < n; ++i){
int index = atoi(l);
l = strchr(l, ',')+1;
if(index < 0) index = params.index + index;
layers[i] = index;
sizes[i] = net->layers[index].outputs;
}
int batch = params.batch;
layer route_layer = make_route_layer(batch, n, layers, sizes);
layer first = net->layers[layers[0]];
route_layer.out_w = first.out_w;
route_layer.out_h = first.out_h;
route_layer.out_c = first.out_c;
for(i = 1; i < n; ++i){
int index = layers[i];
layer next = net->layers[index];
if(next.out_w == first.out_w && next.out_h == first.out_h){
route_layer.out_c += next.out_c;
}else{
route_layer.out_h = route_layer.out_w = route_layer.out_c = 0;
}
}
return route_layer;
}
void softmax(float *input, int n, float temp, int stride, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for(i = 0; i < n; ++i){
if(input[i*stride] > largest) largest = input[i*stride];
}
for(i = 0; i < n; ++i){
float e = exp(input[i*stride]/temp - largest/temp);
sum += e;
output[i*stride] = e;
}
for(i = 0; i < n; ++i){
output[i*stride] /= sum;
}
}
void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
{
int g, b;
for(b = 0; b < batch; ++b){
for(g = 0; g < groups; ++g){
softmax(input + b*batch_offset + g*group_offset, n, temp, stride, output + b*batch_offset + g*group_offset);
}
}
}
void forward_region_layer(const layer l, network net)
{
int i,j,b,t,n;
memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
#ifndef GPU
for (b = 0; b < l.batch; ++b){
for(n = 0; n < l.n; ++n){
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2*l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords);
if(!l.background) activate_array(l.output + index, l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, l.coords + 1);
//if(!l.softmax) activate_array(l.output + index, l.classes*l.w*l.h, LOGISTIC);
}
}
if (l.softmax){
int index = entry_index(l, 0, 0, l.coords + !l.background);
softmax_cpu(net.input + index, l.classes + l.background, l.batch*l.n, l.inputs/l.n, l.w*l.h, 1, l.w*l.h, 1, l.output + index);
}
char line[256];
FILE *fp3;
char filename[256];
sprintf(filename, "yolo_layer_%d.txt", 123123);
printf("YOLO_layer:outputs=%d,%s\n",l.outputs,filename);
if( (fp3 = fopen(filename, "w")) == NULL)fprintf(stderr,"CANNOT OPEN\n");
int x;
for( x = 0; x < l.outputs; x++)
{
sprintf(line, "%f\n", net.input[x]);
if(fputs(line,fp3)<0)fprintf(stderr,"write FILE failed\n");
}
fclose(fp3);
#endif
if(!net.train) return;
}
layer make_region_layer(int batch, int w, int h, int n, int classes, int coords)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REGION;
l.n = n;
l.batch = batch;
l.h = h;
l.w = w;
l.c = n*(classes + coords + 1);
l.out_w = l.w;
l.out_h = l.h;
l.out_c = l.c;
l.classes = classes;
l.coords = coords;
l.biases = (float *)calloc(n*2, sizeof(float));
l.outputs = h*w*n*(classes + coords + 1);
l.inputs = l.outputs;
l.truths = 30*(l.coords + 1);
l.output = (float *)calloc(batch*l.outputs, sizeof(float));
int i;
for(i = 0; i < n*2; ++i){
l.biases[i] = .5;
}
l.forward = forward_region_layer;
fprintf(stderr, "detection\n");
srand(0);
return l;
}
layer parse_region(list *options, size_params params)
{
int coords = option_find_int(options, "coords", 4);
int classes = option_find_int(options, "classes", 20);
int num = option_find_int(options, "num", 1);
layer l = make_region_layer(params.batch, params.w, params.h, num, classes, coords);
assert(l.outputs == params.inputs);
l.log = option_find_int_quiet(options, "log", 0);
l.sqrt = option_find_int_quiet(options, "sqrt", 0);
l.softmax = option_find_int(options, "softmax", 0);
l.background = option_find_int_quiet(options, "background", 0);
l.max_boxes = option_find_int_quiet(options, "max",30);
l.jitter = option_find_float(options, "jitter", .2);
l.rescore = option_find_int_quiet(options, "rescore",0);
l.thresh = option_find_float(options, "thresh", .5);
l.classfix = option_find_int_quiet(options, "classfix", 0);
l.absolute = option_find_int_quiet(options, "absolute", 0);
l.random = option_find_int_quiet(options, "random", 0);
l.coord_scale = option_find_float(options, "coord_scale", 1);
l.object_scale = option_find_float(options, "object_scale", 1);
l.noobject_scale = option_find_float(options, "noobject_scale", 1);
l.mask_scale = option_find_float(options, "mask_scale", 1);
l.class_scale = option_find_float(options, "class_scale", 1);
l.bias_match = option_find_int_quiet(options, "bias_match",0);
char *tree_file = option_find_str(options, "tree", 0);
// if (tree_file) l.softmax_tree = read_tree(tree_file);
char *map_file = option_find_str(options, "map", 0);
// if (map_file) l.map = read_map(map_file);
char *a = option_find_str(options, "anchors", 0);
if(a){
int len = strlen(a);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (a[i] == ',') ++n;
}
for(i = 0; i < n; ++i){
float bias = atof(a);
l.biases[i] = bias;
a = strchr(a, ',')+1;
}
}
return l;
}
void reorg_cpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out)
{
int b,i,j,k;
int out_c = c/(stride*stride);
for(b = 0; b < batch; ++b){
for(k = 0; k < c; ++k){
for(j = 0; j < h; ++j){
for(i = 0; i < w; ++i){
int in_index = i + w*(j + h*(k + c*b));
int c2 = k % out_c;
int offset = k / out_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b));
if(forward) out[out_index] = x[in_index];
else out[in_index] = x[out_index];
}
}
}
}
}
void forward_reorg_layer(const layer l, network net)
{
int i;
//if(l.flatten){
// memcpy(l.output, net.input, l.outputs*l.batch*sizeof(float));
// if(l.reverse){
// flatten(l.output, l.w*l.h, l.c, l.batch, 0);
// }else{
// flatten(l.output, l.w*l.h, l.c, l.batch, 1);
// }
//} else if (l.extra) {
// for(i = 0; i < l.batch; ++i){
// copy_cpu(l.inputs, net.input + i*l.inputs, 1, l.output + i*l.outputs, 1);
// }
//} else if (l.reverse){
// reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.output);
//} else {
reorg_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 0, l.output);
//}
}
layer make_reorg_layer(int batch, int w, int h, int c, int stride, int reverse, int flatten, int extra)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = REORG;
l.batch = batch;
l.stride = stride;
l.extra = extra;
l.h = h;
l.w = w;
l.c = c;
l.flatten = flatten;
if(reverse){
l.out_w = w*stride;
l.out_h = h*stride;
l.out_c = c/(stride*stride);
}else{
l.out_w = w/stride;
l.out_h = h/stride;
l.out_c = c*(stride*stride);
}
l.reverse = reverse;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
if(l.extra){
l.out_w = l.out_h = l.out_c = 0;
l.outputs = l.inputs + l.extra;
}
if(extra){
fprintf(stderr, "reorg %4d -> %4d\n", l.inputs, l.outputs);
} else {
fprintf(stderr, "reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n", stride, w, h, c, l.out_w, l.out_h, l.out_c);
}
int output_size = l.outputs * batch;
//l.output = (float *)calloc(output_size, sizeof(float));
l.forward = forward_reorg_layer;
return l;
}
layer parse_reorg(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int reverse = option_find_int_quiet(options, "reverse",0);
int flatten = option_find_int_quiet(options, "flatten",0);
int extra = option_find_int_quiet(options, "extra",0);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before reorg layer must output image.");
layer layer = make_reorg_layer(batch,w,h,c,stride,reverse, flatten, extra);
return layer;
}
void forward_maxpool_layer(layer l, network net)
{
int b,i,j,k,m,n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
for(b = 0; b < l.batch; ++b){
for(k = 0; k < c; ++k){
for(i = 0; i < h; ++i){
for(j = 0; j < w; ++j){
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for(n = 0; n < l.size; ++n){
for(m = 0; m < l.size; ++m){
int cur_h = h_offset + i*l.stride + n;
int cur_w = w_offset + j*l.stride + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? net.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
l.output[out_index] = max;
l.indexes[out_index] = max_i;
}
}
}
}
}
//layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
//{
// layer l;
// memset(&l,0,sizeof(layer));
// l.type = MAXPOOL;
// l.batch = batch;
// l.h = h;
// l.w = w;
// l.c = c;
// l.pad = padding;
// l.out_w = (w + 2*padding)/stride;
// l.out_h = (h + 2*padding)/stride;
// l.out_c = c;
// l.outputs = l.out_h * l.out_w * l.out_c;
// l.inputs = h*w*c;
// l.size = size;
// l.stride = stride;
// int output_size = l.out_h * l.out_w * l.out_c * batch;
// //l.indexes = (int *)calloc(output_size, sizeof(int));
// //l.output = (float *)calloc(output_size, sizeof(float));
// l.forward = forward_maxpool_layer;
//
// fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
// return l;
//}
layer make_maxpool_layer(int batch, int h, int w, int c, int size, int stride, int padding)
{
layer l;
memset(&l,0,sizeof(layer));
l.type = MAXPOOL;
l.batch = batch;
l.h = h;
l.w = w;
l.c = c;
l.pad = padding;
l.out_w = (w + padding - size)/stride + 1;
l.out_h = (h + padding - size)/stride + 1;
l.out_c = c;
l.outputs = l.out_h * l.out_w * l.out_c;
l.inputs = h*w*c;
l.size = size;
l.stride = stride;
int output_size = l.out_h * l.out_w * l.out_c * batch;
//l.indexes = calloc(output_size, sizeof(int));
//l.output = calloc(output_size, sizeof(float));
//l.delta = calloc(output_size, sizeof(float));
l.forward = forward_maxpool_layer;
//l.backward = backward_maxpool_layer;
fprintf(stderr, "max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n", size, size, stride, w, h, c, l.out_w, l.out_h, l.out_c);
return l;
}
layer parse_maxpool(list *options, size_params params)
{
int stride = option_find_int(options, "stride",1);
int size = option_find_int(options, "size",stride);
int padding = option_find_int_quiet(options, "padding", size-1);
int batch,h,w,c;
h = params.h;
w = params.w;
c = params.c;
batch=params.batch;
if(!(h && w && c)) error("Layer before maxpool layer must output image.");
layer maxpool_layer = make_maxpool_layer(batch,h,w,c,size,stride,padding);
return maxpool_layer;
}
learning_rate_policy get_policy(char *s)
{
if (strcmp(s, "random")==0) return RANDOM;
if (strcmp(s, "poly")==0) return POLY;
if (strcmp(s, "constant")==0) return CONSTANT;
if (strcmp(s, "step")==0) return STEP;
if (strcmp(s, "exp")==0) return EXP;
if (strcmp(s, "sigmoid")==0) return SIG;
if (strcmp(s, "steps")==0) return STEPS;
fprintf(stderr, "Couldn't find policy %s, going with constant\n", s);
return CONSTANT;
}
void parse_net_options(list *options, network *net)
{
net->batch = option_find_int(options, "batch",1);
net->learning_rate = option_find_float(options, "learning_rate", .001);
net->momentum = option_find_float(options, "momentum", .9);
net->decay = option_find_float(options, "decay", .0001);
int subdivs = option_find_int(options, "subdivisions",1);
net->time_steps = option_find_int_quiet(options, "time_steps",1);
net->notruth = option_find_int_quiet(options, "notruth",0);
net->batch /= subdivs;
net->batch *= net->time_steps;
net->subdivisions = subdivs;
net->random = option_find_int_quiet(options, "random", 0);
net->adam = option_find_int_quiet(options, "adam", 0);
if(net->adam){
net->B1 = option_find_float(options, "B1", .9);
net->B2 = option_find_float(options, "B2", .999);
net->eps = option_find_float(options, "eps", .0000001);
}
net->h = option_find_int_quiet(options, "height",0);
net->w = option_find_int_quiet(options, "width",0);
net->c = option_find_int_quiet(options, "channels",0);
net->inputs = option_find_int_quiet(options, "inputs", net->h * net->w * net->c);
net->max_crop = option_find_int_quiet(options, "max_crop",net->w*2);
net->min_crop = option_find_int_quiet(options, "min_crop",net->w);
net->max_ratio = option_find_float_quiet(options, "max_ratio", (float) net->max_crop / net->w);
net->min_ratio = option_find_float_quiet(options, "min_ratio", (float) net->min_crop / net->w);
net->center = option_find_int_quiet(options, "center",0);
net->clip = option_find_float_quiet(options, "clip", 0);
net->angle = option_find_float_quiet(options, "angle", 0);
net->aspect = option_find_float_quiet(options, "aspect", 1);
net->saturation = option_find_float_quiet(options, "saturation", 1);
net->exposure = option_find_float_quiet(options, "exposure", 1);
net->hue = option_find_float_quiet(options, "hue", 0);
if(!net->inputs && !(net->h && net->w && net->c)) error("No input parameters supplied");
char *policy_s = option_find_str(options, "policy", "constant");
net->policy = get_policy(policy_s);
net->burn_in = option_find_int_quiet(options, "burn_in", 0);
net->power = option_find_float_quiet(options, "power", 4);
if(net->policy == STEP){
net->step = option_find_int(options, "step", 1);
net->scale = option_find_float(options, "scale", 1);
} else if (net->policy == STEPS){
char *l = option_find(options, "steps");
char *p = option_find(options, "scales");
if(!l || !p) error("STEPS policy must have steps and scales in cfg file");
int len = strlen(l);
int n = 1;
int i;
for(i = 0; i < len; ++i){
if (l[i] == ',') ++n;
}
int *steps = (int *)calloc(n, sizeof(int));
float *scales = (float *)calloc(n, sizeof(float));
for(i = 0; i < n; ++i){
int step = atoi(l);
float scale = atof(p);
l = strchr(l, ',')+1;
p = strchr(p, ',')+1;
steps[i] = step;
scales[i] = scale;
}
net->scales = scales;
net->steps = steps;
net->num_steps = n;
} else if (net->policy == EXP){
net->gamma = option_find_float(options, "gamma", 1);
} else if (net->policy == SIG){
net->gamma = option_find_float(options, "gamma", 1);
net->step = option_find_int(options, "step", 1);
} else if (net->policy == POLY || net->policy == RANDOM){
}
net->max_batches = option_find_int(options, "max_batches", 0);
}
int is_network(section *s)
{
return (strcmp(s->type, "[net]")==0
|| strcmp(s->type, "[network]")==0);
}
network *parse_network_cfg(char *filename)
{
list *sections = read_cfg(filename);
node *n = sections->front;
if(!n) error("Config file has no sections");
network *net = make_network(sections->size - 1);
net->gpu_index = -1;
size_params params;
section *s = (section *)n->val;
list *options = s->options;
if(!is_network(s)) error("First section must be [net] or [network]");
parse_net_options(options, net);
params.h = net->h;
params.w = net->w;
params.c = net->c;
params.inputs = net->inputs;
params.batch = net->batch;
params.time_steps = net->time_steps;
params.net = net;
size_t workspace_size = 0;
n = n->next;
int count = 0;
free_section(s);
fprintf(stderr, "layer filters size input output\n");
while(n){
params.index = count;
fprintf(stderr, "%5d ", count);
s = (section *)n->val;
options = s->options;
//layer l = {0};
layer l;
memset(&l,0,sizeof(layer));
LAYER_TYPE lt = string_to_layer_type(s->type);
if(lt == CONVOLUTIONAL){
l = parse_convolutional(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == ROUTE){
l = parse_route(options, params, net);
}else if(lt == UPSAMPLE){
l = parse_upsample(options, params, net);
}else if(lt == SHORTCUT){
l = parse_shortcut(options, params, net);
}else if(lt == REGION){
l = parse_region(options, params);
}else if(lt == YOLO){
l = parse_yolo(options, params);
}else if(lt == MAXPOOL){
l = parse_maxpool(options, params);
}else if(lt == REORG){
l = parse_reorg(options, params);
}else{
fprintf(stderr, "Type not recognized: %s\n", s->type);
}
l.clip = net->clip;
l.truth = option_find_int_quiet(options, "truth", 0);
l.onlyforward = option_find_int_quiet(options, "onlyforward", 0);
l.stopbackward = option_find_int_quiet(options, "stopbackward", 0);
l.dontsave = option_find_int_quiet(options, "dontsave", 0);
// l.dontload = option_find_int_quiet(options, "dontload", 0);
// l.dontloadscales = option_find_int_quiet(options, "dontloadscales", 0);
//l.learning_rate_scale = option_find_float_quiet(options, "learning_rate", 1);
l.smooth = option_find_float_quiet(options, "smooth", 0);
option_unused(options);
net->layers[count] = l;
if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
free_section(s);
n = n->next;
++count;
if(n){
params.h = l.out_h;
params.w = l.out_w;
params.c = l.out_c;
params.inputs = l.outputs;
}
}
free_list(sections);
layer out = get_network_output_layer(net);
net->outputs = out.outputs;
net->output = out.output;
//net->input = (float *)calloc(net->inputs*net->batch, sizeof(float));
workspace_size = 0;//donot calloc workspace
//if(workspace_size){
// //printf("%ld\n", workspace_size);
// net->workspace = (float *)calloc(1, workspace_size);
//}
return net;
}
list *read_cfg(char *filename)
{
FILE *file = fopen(filename, "r");
if(file == 0) file_error(filename);
char *line;
int nu = 0;
list *options = make_list();
section *current = 0;
while((line=fgetl(file)) != 0){
++ nu;
strip(line);
switch(line[0]){
case '[':
current = (section *)malloc(sizeof(section));
list_insert(options, current);
current->options = make_list();
current->type = line;
break;
case '\0':
case '#':
case ';':
free(line);
break;
default:
if(!read_option(line, current->options)){
fprintf(stderr, "Config file error line %d, could parse: %s\n", nu, line);
free(line);
}
break;
}
}
fclose(file);
return options;
}
void load_convolutional_weights(layer l, FILE *fp)
{
int num = l.nweights;
fread(l.biases, sizeof(float), l.n, fp);
if (l.batch_normalize){
fread(l.scales, sizeof(float), l.n, fp);
fread(l.rolling_mean, sizeof(float), l.n, fp);
fread(l.rolling_variance, sizeof(float), l.n, fp);
}
fread(l.weights, sizeof(float), num, fp);
}
void load_weights_upto(network *net, char *filename, int start, int cutoff)
{
fprintf(stderr, "Loading weights from %s...", filename);
fflush(stdout);
FILE *fp = fopen(filename, "rb");
if(!fp) file_error(filename);
int major;
int minor;
int revision;
fread(&major, sizeof(int), 1, fp);
fread(&minor, sizeof(int), 1, fp);
fread(&revision, sizeof(int), 1, fp);
printf("major=%d;minor=%d;revision=%d\n",major,minor,revision);// 0 2 0
printf("if true ro false:%d\n",(major*10 + minor) >= 2 && major < 1000 && minor < 1000);
if ((major*10 + minor) >= 2 && major < 1000 && minor < 1000){
//fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
fread(net->seen, sizeof(size_t), 1, fp);
}else {
int iseen = 0;
fread(&iseen, sizeof(int), 1, fp);
*net->seen = iseen;
}
//printf("sizeof(size_t)=%u\n",sizeof(size_t));// in my PC is 4
int i;
for(i = start; i < net->n && i < cutoff; ++i){
layer l = net->layers[i];
if(l.type == CONVOLUTIONAL){
load_convolutional_weights(l, fp);
}
}
fprintf(stderr, "Done!\n");
fclose(fp);
}
void load_weights(network *net, char *filename)
{
load_weights_upto(net, filename, 0, net->n);
}
/////////////////praser end
/////////////////network begin
load_args get_base_args(network *net)
{
load_args args = {0};
args.w = net->w;
args.h = net->h;
args.size = net->w;
args.min = net->min_crop;
args.max = net->max_crop;
args.angle = net->angle;
args.aspect = net->aspect;
args.exposure = net->exposure;
args.center = net->center;
args.saturation = net->saturation;
args.hue = net->hue;
return args;
}
network *load_network(char *cfg, char *weights, int clear)
{
network *net = parse_network_cfg(cfg);
//if(weights && weights[0] != 0){
// load_weights(net, weights);
//}
if(clear) (*net->seen) = 0;
return net;
}
char *get_layer_string(LAYER_TYPE a)
{
switch(a){
case CONVOLUTIONAL:
return "convolutional";
case ACTIVE:
return "activation";
case LOCAL:
return "local";
case DECONVOLUTIONAL:
return "deconvolutional";
case CONNECTED:
return "connected";
case RNN:
return "rnn";
case GRU:
return "gru";
case LSTM:
return "lstm";
case CRNN:
return "crnn";
case MAXPOOL:
return "maxpool";
case REORG:
return "reorg";
case AVGPOOL:
return "avgpool";
case SOFTMAX:
return "softmax";
case DETECTION:
return "detection";
case REGION:
return "region";
case YOLO:
return "yolo";
case DROPOUT:
return "dropout";
case CROP:
return "crop";
case COST:
return "cost";
case ROUTE:
return "route";
case SHORTCUT:
return "shortcut";
case NORMALIZATION:
return "normalization";
case BATCHNORM:
return "batchnorm";
default:
break;
}
return "none";
}
network *make_network(int n)
{
network *net = (network *)calloc(1, sizeof(network));
net->n = n;
net->layers = (layer *)calloc(net->n, sizeof(layer));
net->seen = (size_t *)calloc(1, sizeof(size_t));
net->t = (int *)calloc(1, sizeof(int));
net->cost = (float *)calloc(1, sizeof(float));
return net;
}
void forward_network(network *netp)
{
network net = *netp;
int i;
for(i = 0; i < net.n; ++i){
net.index = i;
layer l = net.layers[i];
l.forward(l, net);
net.input = l.output;
// printf("layer [%d]\n",i);
}
}
void set_temp_network(network *net, float t)
{
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].temperature = t;
}
}
void set_batch_network(network *net, int b)
{
net->batch = b;
int i;
for(i = 0; i < net->n; ++i){
net->layers[i].batch = b;
}
}
float *network_predict(network *net, float *input)
{
network orig = *net;
net->input = input;
net->truth = 0;
net->train = 0;
net->delta = 0;
forward_network(net);
float *out = net->output;
*net = orig;
return out;
}
int yolo_num_detections(layer l, float thresh)
{
int i, n;
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
if(l.output[obj_index] > thresh){
++count;
}
}
}
return count;
}
int num_detections(network *net, float thresh)
{
int i;
int s = 0;
for(i = 0; i < net->n; ++i){
layer l = net->layers[i];
if(l.type == YOLO){
s += yolo_num_detections(l, thresh);
}
if(l.type == DETECTION || l.type == REGION){
s += l.w*l.h*l.n;
}
}
return s;
}
detection *make_network_boxes(network *net, float thresh, int *num)
{
layer l = net->layers[net->n - 1];
int i;
int nboxes = num_detections(net, thresh);
//printf("num_detections nboxes = %d\n",nboxes);
if(num) *num = nboxes;
detection *dets = (detection *)calloc(nboxes, sizeof(detection));
for(i = 0; i < nboxes; ++i){
dets[i].prob = (float *)calloc(l.classes, sizeof(float));
}
return dets;
}
box get_yolo_box(float *x, float *biases, int n, int index, int i, int j, int lw, int lh, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / lw;
b.y = (j + x[index + 1*stride]) / lh;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_yolo_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
int get_yolo_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, int relative, detection *dets)
{
int i,j,n;
float *predictions = l.output;
// if (l.batch == 2) avg_flipped_yolo(l);
int count = 0;
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int obj_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float objectness = predictions[obj_index];
if(objectness <= thresh) continue;
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
dets[count].bbox = get_yolo_box(predictions, l.biases, l.mask[n], box_index, col, row, l.w, l.h, netw, neth, l.w*l.h);
dets[count].objectness = objectness;
dets[count].classes = l.classes;
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, 4 + 1 + j);
float prob = objectness*predictions[class_index];
dets[count].prob[j] = (prob > thresh) ? prob : 0;
}
++count;
}
}
correct_yolo_boxes(dets, count, w, h, netw, neth, relative);
return count;
}
box get_region_box(float *x, float *biases, int n, int index, int i, int j, int w, int h, int stride)
{
box b;
b.x = (i + x[index + 0*stride]) / w;
b.y = (j + x[index + 1*stride]) / h;
b.w = exp(x[index + 2*stride]) * biases[2*n] / w;
b.h = exp(x[index + 3*stride]) * biases[2*n+1] / h;
return b;
}
void correct_region_boxes(detection *dets, int n, int w, int h, int netw, int neth, int relative)
{
int i;
int new_w=0;
int new_h=0;
if (((float)netw/w) < ((float)neth/h)) {
new_w = netw;
new_h = (h * netw)/w;
} else {
new_h = neth;
new_w = (w * neth)/h;
}
for (i = 0; i < n; ++i){
box b = dets[i].bbox;
b.x = (b.x - (netw - new_w)/2./netw) / ((float)new_w/netw);
b.y = (b.y - (neth - new_h)/2./neth) / ((float)new_h/neth);
b.w *= (float)netw/new_w;
b.h *= (float)neth/new_h;
if(!relative){
b.x *= w;
b.w *= w;
b.y *= h;
b.h *= h;
}
dets[i].bbox = b;
}
}
void get_region_detections(layer l, int w, int h, int netw, int neth, float thresh, int *map, float tree_thresh, int relative, detection *dets)
{
int i,j,n,z;
float *predictions = l.output;
if (l.batch == 2) {
float *flip = l.output + l.outputs;
for (j = 0; j < l.h; ++j) {
for (i = 0; i < l.w/2; ++i) {
for (n = 0; n < l.n; ++n) {
for(z = 0; z < l.classes + l.coords + 1; ++z){
int i1 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + i;
int i2 = z*l.w*l.h*l.n + n*l.w*l.h + j*l.w + (l.w - i - 1);
float swap = flip[i1];
flip[i1] = flip[i2];
flip[i2] = swap;
if(z == 0){
flip[i1] = -flip[i1];
flip[i2] = -flip[i2];
}
}
}
}
}
for(i = 0; i < l.outputs; ++i){
l.output[i] = (l.output[i] + flip[i])/2.;
}
}
for (i = 0; i < l.w*l.h; ++i){
int row = i / l.w;
int col = i % l.w;
for(n = 0; n < l.n; ++n){
int index = n*l.w*l.h + i;
for(j = 0; j < l.classes; ++j){
dets[index].prob[j] = 0;
}
int obj_index = entry_index(l, 0, n*l.w*l.h + i, l.coords);
int box_index = entry_index(l, 0, n*l.w*l.h + i, 0);
int mask_index = entry_index(l, 0, n*l.w*l.h + i, 4);
float scale = l.background ? 1 : predictions[obj_index];
dets[index].bbox = get_region_box(predictions, l.biases, n, box_index, col, row, l.w, l.h, l.w*l.h);
dets[index].objectness = scale > thresh ? scale : 0;
if(dets[index].mask){
for(j = 0; j < l.coords - 4; ++j){
dets[index].mask[j] = l.output[mask_index + j*l.w*l.h];
}
}
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + !l.background);
if(dets[index].objectness){
for(j = 0; j < l.classes; ++j){
int class_index = entry_index(l, 0, n*l.w*l.h + i, l.coords + 1 + j);
float prob = scale*predictions[class_index];
dets[index].prob[j] = (prob > thresh) ? prob : 0;
}
}
}
}
correct_region_boxes(dets, l.w*l.h*l.n, w, h, netw, neth, relative);
}
void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets)
{
int j;
for(j = 0; j < net->n; ++j){
layer l = net->layers[j];
if(l.type == YOLO){
int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets);
dets += count;
}
if(l.type == REGION){
get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
dets += l.w*l.h*l.n;
}
}
}
detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num)
{
detection *dets = make_network_boxes(net, thresh, num);
fill_network_boxes(net, w, h, thresh, hier, map, relative, dets);
return dets;
}
void free_detections(detection *dets, int n)
{
int i;
for(i = 0; i < n; ++i){
free(dets[i].prob);
if(dets[i].mask) free(dets[i].mask);
}
free(dets);
}
int network_width(network *net){return net->w;}
int network_height(network *net){return net->h;}
layer get_network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
void free_network(network *net)
{
int i;
for(i = 0; i < net->n; ++i){
free_layer(net->layers[i]);
}
free(net->layers);
if(net->input) free(net->input);
if(net->truth) free(net->truth);
free(net);
}
layer network_output_layer(network *net)
{
int i;
for(i = net->n - 1; i >= 0; --i){
if(net->layers[i].type != COST) break;
}
return net->layers[i];
}
int network_inputs(network *net)
{
return net->layers[0].inputs;
}
int network_outputs(network *net)
{
return network_output_layer(net).outputs;
}
float *network_output(network *net)
{
return network_output_layer(net).output;
}
//////////////////network end
//////////////////////box begin
int nms_comparator(const void *pa, const void *pb)
{
detection a = *(detection *)pa;
detection b = *(detection *)pb;
float diff = 0;
if(b.sort_class >= 0){
diff = a.prob[b.sort_class] - b.prob[b.sort_class];
} else {
diff = a.objectness - b.objectness;
}
if(diff < 0) return 1;
else if(diff > 0) return -1;
return 0;
}
float overlap(float x1, float w1, float x2, float w2)
{
float l1 = x1 - w1/2;
float l2 = x2 - w2/2;
float left = l1 > l2 ? l1 : l2;
float r1 = x1 + w1/2;
float r2 = x2 + w2/2;
float right = r1 < r2 ? r1 : r2;
return right - left;
}
float box_intersection(box a, box b)
{
float w = overlap(a.x, a.w, b.x, b.w);
float h = overlap(a.y, a.h, b.y, b.h);
if(w < 0 || h < 0) return 0;
float area = w*h;
return area;
}
float box_union(box a, box b)
{
float i = box_intersection(a, b);
float u = a.w*a.h + b.w*b.h - i;
return u;
}
float box_iou(box a, box b)
{
return box_intersection(a, b)/box_union(a, b);
}
void do_nms_sort(detection *dets, int total, int classes, float thresh)
{
int i, j, k;
k = total-1;
for(i = 0; i <= k; ++i){
if(dets[i].objectness == 0){
detection swap = dets[i];
dets[i] = dets[k];
dets[k] = swap;
--k;
--i;
}
}
total = k+1;
for(k = 0; k < classes; ++k){
for(i = 0; i < total; ++i){
dets[i].sort_class = k;
}
qsort(dets, total, sizeof(detection), nms_comparator);
for(i = 0; i < total; ++i){
if(dets[i].prob[k] == 0) continue;
box a = dets[i].bbox;
for(j = i+1; j < total; ++j){
box b = dets[j].bbox;
if (box_iou(a, b) > thresh){
dets[j].prob[k] = 0;
}
}
}
}
}
//////////////////////box end
//////////////////////image begin
float colors[6][3] = { {1,0,1}, {0,0,1},{0,1,1},{0,1,0},{1,1,0},{1,0,0} };
float get_color(int c, int x, int max)
{
float ratio = ((float)x/max)*5;
int i = floor(ratio);
int j = ceil(ratio);
ratio -= i;
float r = (1-ratio) * colors[i][c] + ratio*colors[j][c];
//printf("%f\n", r);
return r;
}
static float get_pixel_extend(image m, int x, int y, int c)
{
if(x < 0 || x >= m.w || y < 0 || y >= m.h) return 0;
/*
if(x < 0) x = 0;
if(x >= m.w) x = m.w-1;
if(y < 0) y = 0;
if(y >= m.h) y = m.h-1;
*/
if(c < 0 || c >= m.c) return 0;
return get_pixel(m, x, y, c);
}
void composite_image(image source, image dest, int dx, int dy)
{
int x,y,k;
for(k = 0; k < source.c; ++k){
for(y = 0; y < source.h; ++y){
for(x = 0; x < source.w; ++x){
float val = get_pixel(source, x, y, k);
float val2 = get_pixel_extend(dest, dx+x, dy+y, k);
set_pixel(dest, dx+x, dy+y, k, val * val2);
}
}
}
}
image border_image(image a, int border)
{
image b = make_image(a.w + 2*border, a.h + 2*border, a.c);
int x,y,k;
for(k = 0; k < b.c; ++k){
for(y = 0; y < b.h; ++y){
for(x = 0; x < b.w; ++x){
float val = get_pixel_extend(a, x - border, y - border, k);
if(x - border < 0 || x - border >= a.w || y - border < 0 || y - border >= a.h) val = 1;
set_pixel(b, x, y, k, val);
}
}
}
return b;
}
image copy_image(image p)
{
image copy = p;
copy.data = (float *)calloc(p.h*p.w*p.c, sizeof(float));
memcpy(copy.data, p.data, p.h*p.w*p.c*sizeof(float));
return copy;
}
image tile_images(image a, image b, int dx)
{
if(a.w == 0) return copy_image(b);
image c = make_image(a.w + b.w + dx, (a.h > b.h) ? a.h : b.h, (a.c > b.c) ? a.c : b.c);
fill_cpu(c.w*c.h*c.c, 1, c.data, 1);
embed_image(a, c, 0, 0);
composite_image(b, c, a.w + dx, 0);
return c;
}
image get_label(image **characters, char *string, int size)
{
size = size/10;
if(size > 7) size = 7;
image label = make_empty_image(0,0,0);
while(*string){
image l = characters[size][(int)*string];
image n = tile_images(label, l, -size - 1 + (size+1)/2);
free_image(label);
label = n;
++string;
}
image b = border_image(label, label.h*.25);
free_image(label);
return b;
}
void draw_label(image a, int r, int c, image label, const float *rgb)
{
int w = label.w;
int h = label.h;
if (r - h >= 0) r = r - h;
int i, j, k;
for(j = 0; j < h && j + r < a.h; ++j){
for(i = 0; i < w && i + c < a.w; ++i){
for(k = 0; k < label.c; ++k){
float val = get_pixel(label, i, j, k);
set_pixel(a, i+c, j+r, k, rgb[k] * val);
}
}
}
}
void draw_box(image a, int x1, int y1, int x2, int y2, float r, float g, float b)
{
//normalize_image(a);
int i;
if(x1 < 0) x1 = 0;
if(x1 >= a.w) x1 = a.w-1;
if(x2 < 0) x2 = 0;
if(x2 >= a.w) x2 = a.w-1;
if(y1 < 0) y1 = 0;
if(y1 >= a.h) y1 = a.h-1;
if(y2 < 0) y2 = 0;
if(y2 >= a.h) y2 = a.h-1;
for(i = x1; i <= x2; ++i){
a.data[i + y1*a.w + 0*a.w*a.h] = r;
a.data[i + y2*a.w + 0*a.w*a.h] = r;
a.data[i + y1*a.w + 1*a.w*a.h] = g;
a.data[i + y2*a.w + 1*a.w*a.h] = g;
a.data[i + y1*a.w + 2*a.w*a.h] = b;
a.data[i + y2*a.w + 2*a.w*a.h] = b;
}
for(i = y1; i <= y2; ++i){
a.data[x1 + i*a.w + 0*a.w*a.h] = r;
a.data[x2 + i*a.w + 0*a.w*a.h] = r;
a.data[x1 + i*a.w + 1*a.w*a.h] = g;
a.data[x2 + i*a.w + 1*a.w*a.h] = g;
a.data[x1 + i*a.w + 2*a.w*a.h] = b;
a.data[x2 + i*a.w + 2*a.w*a.h] = b;
}
}
void draw_box_width(image a, int x1, int y1, int x2, int y2, int w, float r, float g, float b)
{
int i;
for(i = 0; i < w; ++i){
draw_box(a, x1+i, y1+i, x2-i, y2-i, r, g, b);
}
}
image float_to_image(int w, int h, int c, float *data)
{
image out = make_empty_image(w,h,c);
out.data = data;
return out;
}
image threshold_image(image im, float thresh)
{
int i;
image t = make_image(im.w, im.h, im.c);
for(i = 0; i < im.w*im.h*im.c; ++i){
t.data[i] = im.data[i]>thresh ? 1 : 0;
}
return t;
}
void draw_detections(image im, detection *dets, int num, float thresh, char **names, image **alphabet, int classes)
{
int i,j;
for(i = 0; i < num; ++i){
char labelstr[4096] = {0};
int class_t = -1;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j] > thresh){
if (class_t < 0) {
strcat(labelstr, names[j]);
class_t = j;
} else {
strcat(labelstr, ", ");
strcat(labelstr, names[j]);
}
printf("%s: %.0f%%\n", names[j], dets[i].prob[j]*100);
}
}
if(class_t >= 0){
int width = im.h * .006;
//printf("%d %s: %.0f%%\n", i, names[class], prob*100);
int offset = class_t*123457 % classes;
float red = get_color(2,offset,classes);
float green = get_color(1,offset,classes);
float blue = get_color(0,offset,classes);
float rgb[3];
//width = prob*20+2;
rgb[0] = red;
rgb[1] = green;
rgb[2] = blue;
box b = dets[i].bbox;
//printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
int left = (b.x-b.w/2.)*im.w;
int right = (b.x+b.w/2.)*im.w;
int top = (b.y-b.h/2.)*im.h;
int bot = (b.y+b.h/2.)*im.h;
if(left < 0) left = 0;
if(right > im.w-1) right = im.w-1;
if(top < 0) top = 0;
if(bot > im.h-1) bot = im.h-1;
draw_box_width(im, left, top, right, bot, width, red, green, blue);
if (alphabet) {
image label = get_label(alphabet, labelstr, (im.h*.03));
draw_label(im, top + width, left, label, rgb);
free_image(label);
}
if (dets[i].mask){
image mask = float_to_image(14, 14, 1, dets[i].mask);
image resized_mask = resize_image(mask, b.w*im.w, b.h*im.h);
image tmask = threshold_image(resized_mask, .5);
embed_image(tmask, im, left, top);
free_image(mask);
free_image(resized_mask);
free_image(tmask);
}
}
}
}
//////////////////////image end
///////////////////////////////////////////////////////////////////////20181229 anti-reorg start => KxKxTmxTn
#define MAX(x,y) ((x)>(y)?(x):(y))
#define MIN(x,y) ((x)<(y)?(x):(y))
#define S 2
#define K 3
#define Tn 4
#define Tm 32
#define Tr 26
#define Tc 26
#define OnChipIB_Width ((Tc-1)*S+K)
#define OnChipIB_Height ((Tr-1)*S+K)
#define MAX_BETA_LENGTH (1024)
#define REORG_GEN
//#define REORG_TEST
//////////////////////////////////////////////////T3 start
void input_load(float *input,float input_buffer[Tn][OnChipIB_Height][OnChipIB_Width],int r,int c,int n,int Kernel_stride,int Padding,int TRow,int TCol,int Input_w,int Input_h,int TN_MIN,int IHxIW,int LayerType)
{
int t1,t2,t3,t4;
int xoffset;
int yoffset;
static float input_memcpy_buffer[Tn*OnChipIB_Height*OnChipIB_Width];
const int Coffset = c*Kernel_stride - Padding;
const int Roffset = r*Kernel_stride - Padding;
const int CurrentOffset = n*IHxIW + Roffset*Input_w + Coffset;
float pad_value = 0;
if(LayerType==1)
pad_value = -1024*1024;
int input_mmcpy_offset = 0;
for(t1 = 0;t1 < TN_MIN; t1++)
for(t2 = 0;t2 < TRow; t2++)
{
memcpy((float *)(input_memcpy_buffer + input_mmcpy_offset),(float *)(input + CurrentOffset + t1*IHxIW + t2*Input_w),TCol*sizeof(float));
input_mmcpy_offset += TCol;
}
input_mmcpy_offset = 0;
for(t1 = 0;t1 < Tn; t1++)
for(t2 = 0;t2 < TRow; t2++)
for(t3 = 0;t3 < TCol; t3++)
{
xoffset = Coffset + t3;
yoffset = Roffset + t2;
bool XEnable = (xoffset >= 0)&&(xoffset < Input_w);
bool YEnable = (yoffset >= 0)&&(yoffset < Input_h);
bool PaddingEnable = XEnable&&YEnable;
if(PaddingEnable&&(t1 < TN_MIN))
input_buffer[t1][t2][t3] = input_memcpy_buffer[input_mmcpy_offset];
else
input_buffer[t1][t2][t3] = pad_value;
input_mmcpy_offset++;
}
}
void weight_load(float *Weight,float weight_buffer[Tm][Tn][K][K],bool weight_load_enable,int m,int n,int IFM_numxKxK,int KxK,int Kernel_size,int TM_MIN,int TN_MIN)
{
int t1,t2,t3,t4;
static float weight_memcpy_buffer[Tm*Tn*K*K];
if(!weight_load_enable)
return;
const int Woffset = m*IFM_numxKxK + n*KxK;
int weight_memcpy_offset = 0;
for(t1 = 0;t1 < TM_MIN; t1++)
for(t2 = 0;t2 < TN_MIN; t2++)
{
memcpy((float *)(weight_memcpy_buffer + weight_memcpy_offset),(float *)(Weight + Woffset + t1*IFM_numxKxK + t2*KxK),KxK*sizeof(float));
weight_memcpy_offset += KxK;
}
weight_memcpy_offset = 0;
for(t1 = 0;t1 < Tm; t1++)
for(t2 = 0;t2 < Tn; t2++)
for(t3 = 0;t3 <Kernel_size; t3++)
for(t4 = 0;t4 <Kernel_size; t4++)
{
bool Enable = (t1 < TM_MIN)&&(t2 < TN_MIN);
if(Enable)
{
weight_buffer[t1][t2][t3][t4] = weight_memcpy_buffer[weight_memcpy_offset];
weight_memcpy_offset++;
}
else
weight_buffer[t1][t2][t3][t4] = 0;
}
}
void weight_load_reorg(float *Weight,float weight_buffer[Tm][Tn][K][K],bool weight_load_enable,int m,int n,int IFM_numxKxK,int KxK,int Kernel_size,int TM_MIN,int TN_MIN)
{
int t1,t2,t3,t4;
static float weight_memcpy_buffer[Tm*Tn*K*K];
static int Woffset;
if(!weight_load_enable)
return;
if(m==0&&n==0)
Woffset = 0;
memcpy(weight_memcpy_buffer,(float *)(Weight + Woffset),TM_MIN*TN_MIN*KxK*sizeof(float));
Woffset += TM_MIN*TN_MIN*KxK;
int weight_memcpy_offset = 0;
for(t3 = 0;t3 <Kernel_size; t3++)
for(t4 = 0;t4 <Kernel_size; t4++)
for(t1 = 0;t1 < Tm; t1++)
for(t2 = 0;t2 < Tn; t2++)
{
bool Enable = (t1 < TM_MIN)&&(t2 < TN_MIN);
if(Enable)
{
weight_buffer[t1][t2][t3][t4] = weight_memcpy_buffer[weight_memcpy_offset];
weight_memcpy_offset++;
}
else
weight_buffer[t1][t2][t3][t4] = 0;
}
}
void copy_input_weight(float *input,float *Weight,int InFM_num,int Input_w,int Input_h,int OutFM_num,int Kernel_size,int Kernel_stride,int r,int c,int m,int n,
int TM_MIN,int TN,int TRow,int TCol,int Padding,float input_buffer[Tn][OnChipIB_Height][OnChipIB_Width],float weight_buffer[Tm][Tn][K][K],int TMP_N_next[1],
bool enable,bool weight_load_enable,bool initialize,const int IHxIW,const int KxK,const int IFM_numxKxK,const int LayerType)
{
if(!enable)
return ;
const int TN_MIN = MIN(TN,InFM_num - n);
TMP_N_next[0] = n;
input_load(input, input_buffer, r, c, n, Kernel_stride, Padding, TRow, TCol, Input_w, Input_h, TN_MIN, IHxIW, LayerType);
#ifdef REORG_TEST
weight_load_reorg(Weight,weight_buffer,weight_load_enable,m,n,IFM_numxKxK,KxK,Kernel_size,TM_MIN,TN_MIN);
#else
weight_load(Weight,weight_buffer,weight_load_enable,m,n,IFM_numxKxK,KxK,Kernel_size,TM_MIN,TN_MIN);
#endif
}
//////////////////////////////////////////////////T3 end
void copy_local_beta(float beta_buffer[MAX_BETA_LENGTH],float local_beta_buffer[MAX_BETA_LENGTH],const int TM_MIN,int m)
{
//memcpy(local_beta_buffer,(float *)(beta_buffer+m),TM_MIN*sizeof(float));
int offset;
int tm;
for(tm = 0,offset = m;tm < TM_MIN;tm++)
{
local_beta_buffer[tm] = beta_buffer[offset];
offset++;
}
}
void nonlinear_leaky(float Input[Tm][Tr][Tc],const int TM_MIN,const int TR_MIN,const int TC_MIN,const bool IsNL)
{
int tr,tc,tm;
if(!IsNL)
return ;
for(tm = 0;tm < TM_MIN;tm++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=1
for(tr = 0;tr < TR_MIN;tr++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=14
for(tc = 0;tc < TC_MIN;tc++)
{
#pragma HLS LOOP_TRIPCOUNT min=14 max=14
#pragma HLS PIPELINE
float tmp = Input[tm][tr][tc];
if(tmp < 0)
Input[tm][tr][tc] = tmp*0.1;
}
}
void compute(float input_buffer[Tn][OnChipIB_Height][OnChipIB_Width],float output_buffer[Tm][Tr][Tc],
float weight_buffer[Tm][Tn][K][K],float beta_buffer[MAX_BETA_LENGTH],int TMP_N_next[1],
const int Kernel_size,const int Kernel_stride,int TMP_M,
const int TM_MIN,const int TR_MIN,const int TC_MIN,bool enable,const bool IsNL,const bool reluenable)
{
static float local_beta_buffer[Tm];
#pragma HLS ARRAY_PARTITION variable=local_beta_buffer complete dim=1
if(!enable)
{
copy_local_beta(beta_buffer,local_beta_buffer,TM_MIN,TMP_M);
return;
}
int i,j,tr,tc,tm,tn;
int n = TMP_N_next[0];
float partial_mul[Tm][Tn];
float partial_add[Tm];
for(i =0;i < Kernel_size; i++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=5
for(j = 0;j < Kernel_size; j++)
#pragma HLS LOOP_TRIPCOUNT min=1 max=5
for(tr = 0;tr < TR_MIN;tr++)
#pragma HLS LOOP_TRIPCOUNT min=14 max=14
for(tc = 0;tc < TC_MIN;tc++)
{
#pragma HLS LOOP_TRIPCOUNT min=14 max=14
#pragma HLS PIPELINE
for(tm = 0;tm < Tm;tm++)
{
if(i==0&&j==0&&n==0)
partial_add[tm] = local_beta_buffer[tm];
else
partial_add[tm] = output_buffer[tm][tr][tc];
}
for(tm = 0;tm < Tm;tm++)
for(tn = 0;tn <Tn;tn++)
{
partial_mul[tm][tn] = weight_buffer[tm][tn][i][j]*input_buffer[tn][Kernel_stride*tr+i][Kernel_stride*tc+j];
}
for(tm = 0;tm < Tm;tm++)
{
float partial_sum = 0;
for(tn = 0;tn <Tn;tn++)
{
partial_sum += partial_mul[tm][tn];
}
output_buffer[tm][tr][tc] = partial_add[tm] + partial_sum;
}
}
if(reluenable)
nonlinear_leaky(output_buffer,TM_MIN,TR_MIN,TC_MIN,IsNL);
}
void write_back_output_reorg(float output_buffer[Tm][Tr][Tc],float *Output,int r,int c,int m,const int Output_w,const int Output_h,
const int TM_MIN,const int TR_MIN,const int TC_MIN,const int OHxOW,bool write_flag)
{
if(!write_flag)
return;
const int offset = m*OHxOW + r*Output_w + c;
int tr,tm;
//for(tm = 0;tm < TM_MIN;tm++)
// for(tr = 0;tr < TR_MIN;tr++)
// for(tc = 0;tc < TC_MIN;tc++)
// {
// Output[tm*OHxOW + tr*Output_w + tc + offset] = output_buffer[tm][tr][tc];
// }
for(tm = 0;tm < TM_MIN;tm++)
for(tr = 0;tr < TR_MIN;tr++)
{
memcpy((float *)(Output + tm*OHxOW + tr*Output_w + offset),output_buffer[tm][tr],TC_MIN*sizeof(float));
}
}
void pool_yolo2(float Input[Tn][OnChipIB_Height][OnChipIB_Width],float Output[Tm][Tr][Tc],
const int Kernel_size,const int Kernel_stride,
const int TM_MIN,const int TR_MIN,const int TC_MIN,bool enable)
{
if(!enable)
return;
int i,j,tr,tc,of;
float tmp[Tn];
for(tr = 0;tr < TR_MIN;tr++)
for(tc = 0;tc < TC_MIN;tc++)
for(i =0;i < Kernel_size; i++)
for(j = 0;j < Kernel_size; j++)
{
#pragma HLS PIPELINE
for( of = 0; of < Tn; of++)
{
if(i==0&&j==0)
tmp[of] = -1024*1024;
if(Input[of][tr*Kernel_stride+i][tc*Kernel_stride+j] > tmp[of])
tmp[of] = Input[of][tr*Kernel_stride+i][tc*Kernel_stride+j];
if(i==1&&j==1)
Output[of][tr][tc] = tmp[of];
}
}
}
void reorg_yolo2(float Input[Tn][OnChipIB_Height][OnChipIB_Width],float Output[Tm][Tr][Tc],
const int Kernel_size,const int Kernel_stride,
const int TM_MIN,const int TR_MIN,const int TC_MIN,bool enable)
{
int x, y,kx,ky;
unsigned char Yoffset;
unsigned char Xoffset;
if(!enable)
return;
for( y = 0; y < TR_MIN; y++)
for( x = 0; x < TC_MIN; x++)
for(ky= 0;ky < 2; ky++)
for(kx = 0;kx < 2; kx++)
{
#pragma HLS PIPELINE
Yoffset = (y << 1) + ky;
Xoffset = (x << 1) + kx;
int in_index = (ky << 1) + kx;
Output[in_index][y][x] = Input[0][Yoffset][Xoffset];
}
}
void intra_pingpong_wrapper(float *Input,float *Weight, float output_buffer[Tm][Tr][Tc],float beta_buffer[MAX_BETA_LENGTH],
float input_buffer0[Tn][OnChipIB_Height][OnChipIB_Width],float input_buffer1[Tn][OnChipIB_Height][OnChipIB_Width],
int InFM_num,int Input_w,int Input_h,int OutFM_num,int Kernel_size,int Kernel_stride,
int TMP_R,int TMP_C,int TMP_M,int m,int TM_MIN,int TR_MIN,int TC_MIN,int TN,int TRow,int TCol,int Padding,
int IHxIW,int KxK,int IFM_numxKxK,int nLoops,bool IsNL,int LayerType,int TM,int TMP_X_next[1],int TX_MIN_next[1],bool pingpongx,bool input_flag,bool process_flag)
{
static float weight_buffer0[Tm][Tn][K][K];
#pragma HLS ARRAY_PARTITION variable=weight_buffer0 complete dim=1
#pragma HLS ARRAY_PARTITION variable=weight_buffer0 complete dim=2
static float weight_buffer1[Tm][Tn][K][K];
#pragma HLS ARRAY_PARTITION variable=weight_buffer1 complete dim=1
#pragma HLS ARRAY_PARTITION variable=weight_buffer1 complete dim=2
static int NOP[1];
static int tmp_x;
static int tmp_tx_min;
if(LayerType==0)
{
if(!input_flag)
return;
TMP_X_next[0] = TMP_M;//consider by the inner-out loop
TX_MIN_next[0] = TM_MIN;// like above
bool pingpong = 0;
int TMP_N_next0[1];
int TMP_N_next1[1];
int n;
int TMP_N;
for(TMP_N = 0,n = 0;n < nLoops+1; n++,TMP_N += TN)
{
if(pingpong == 1)
{
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_N,
TM_MIN,TN,TRow,TCol,Padding,input_buffer1,weight_buffer1,TMP_N_next1,n!=nLoops,1,(m==0)&&(n==0),IHxIW,KxK,IFM_numxKxK,LayerType);
compute(input_buffer0,output_buffer,weight_buffer0,beta_buffer,TMP_N_next0,Kernel_size,Kernel_stride,TMP_M,TM_MIN,TR_MIN,TC_MIN,n!=0,IsNL,n==nLoops);
pingpong = 0;
}else
{
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_N,
TM_MIN,TN,TRow,TCol,Padding,input_buffer0,weight_buffer0,TMP_N_next0,n!=nLoops,1,(m==0)&&(n==0),IHxIW,KxK,IFM_numxKxK,LayerType);
compute(input_buffer1,output_buffer,weight_buffer1,beta_buffer,TMP_N_next1,Kernel_size,Kernel_stride,TMP_M,TM_MIN,TR_MIN,TC_MIN,n!=0,IsNL,n==nLoops);
pingpong = 1;
}
}
}
else if(LayerType==1)
{
if(pingpongx==0)
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer0,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType);
pool_yolo2(input_buffer1,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}else
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer1,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType);
pool_yolo2(input_buffer0,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}
}
else if(LayerType==2)
{
if(pingpongx==0)
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer0,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType);
reorg_yolo2(input_buffer1,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}else
{
TMP_X_next[0] = tmp_x;
TX_MIN_next[0] = tmp_tx_min;
tmp_x = TMP_M;
tmp_tx_min = TM_MIN;
copy_input_weight(Input,Weight,InFM_num,Input_w,Input_h,OutFM_num,Kernel_size,Kernel_stride,TMP_R,TMP_C,TMP_M,TMP_M,
TM_MIN,TM,TRow,TCol,0,input_buffer1,NULL,NOP,input_flag,0,0,IHxIW,KxK,IFM_numxKxK,LayerType);
reorg_yolo2(input_buffer0,output_buffer,Kernel_size,Kernel_stride,TX_MIN_next[0],TR_MIN,TC_MIN,process_flag);
}
}
}
void YOLO2_FPGA(float *Input,float *Output,float *Weight,float *Beta,const int InFM_num,const int OutFM_num,
const int Kernel_size,const int Kernel_stride,
const int Input_w,const int Input_h,const int Padding,const bool IsNL,const bool IsBN,
const int TM,const int TN,const int TR,const int TC,
const int mLoops,const int nLoops,const int rLoops,const int cLoops,const int LayerType)
{
//const int output_w = (Input_w - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
//const int output_h = (Input_h - Kernel_size + 2*Padding)/Kernel_stride + 1 ;
int output_w = (Input_w - Kernel_size + (Padding << 1))/Kernel_stride + 1 ;
int output_h = (Input_h - Kernel_size + (Padding << 1))/Kernel_stride + 1 ;
if(LayerType==1)
{
output_w = (Input_w - 1)/Kernel_stride + 1 ;
output_h = (Input_h - 1)/Kernel_stride + 1 ;
}
const int OHxOW = output_h*output_w;
const int TRow = (TR-1)*Kernel_stride+Kernel_size;
const int TCol = (TC-1)*Kernel_stride+Kernel_size;
const int IHxIW = Input_h*Input_w;
const int KxK = Kernel_size*Kernel_size;
const int IFM_numxKxK = InFM_num*KxK;
const int mLoops_bound = LayerType ? (mLoops + 2): (mLoops + 1);
static float input_buffer0[Tn][OnChipIB_Height][OnChipIB_Width];
#pragma HLS ARRAY_PARTITION variable=input_buffer0 complete dim=1
static float input_buffer1[Tn][OnChipIB_Height][OnChipIB_Width];
#pragma HLS ARRAY_PARTITION variable=input_buffer1 complete dim=1
static float output_buffer[Tm][Tr][Tc];
#pragma HLS ARRAY_PARTITION variable=output_buffer complete dim=1
static float output_buffer1[Tm][Tr][Tc];
#pragma HLS ARRAY_PARTITION variable=output_buffer1 complete dim=1
static float beta_buffer[MAX_BETA_LENGTH];
int r,c,m;
/////////////////////////////////param
int TMP_R,TMP_C,TMP_M;
int TM_MIN,TR_MIN,TC_MIN;
///////////////////////////////////////
int TMP_M_next0[1];
int TMP_M_next1[1];
int TM_MIN_next0[1];
int TM_MIN_next1[1];
bool pingpongm;
if(LayerType==0)
memcpy(beta_buffer,Beta,OutFM_num*sizeof(float));
for(TMP_R = 0,r = 0; r < rLoops; r++, TMP_R += TR)
{
TR_MIN = MIN(TR,output_h -TMP_R);
for(TMP_C = 0,c = 0; c < cLoops; c++,TMP_C += TC)
{
TC_MIN = MIN(TC,output_w -TMP_C);
pingpongm = 0;
for(TMP_M = 0, m = 0; m < mLoops_bound; m++,TMP_M += TM)
{
TM_MIN = MIN(TM,OutFM_num-TMP_M);
bool MneZero = (m!=0);
bool MneOne = (m!=1);
bool MnemLoops = (m!=mLoops);
bool MneMLoopsaddOne = (m!=(mLoops+1));
bool input_flag = LayerType ? MnemLoops&&MneMLoopsaddOne: MnemLoops;
bool process_flag = LayerType ? MneZero&&MneMLoopsaddOne : MnemLoops;
bool write_flag = LayerType ? MneZero&&MneOne : MneZero;
if(pingpongm==0)
{
intra_pingpong_wrapper(Input,Weight,output_buffer1,beta_buffer,input_buffer0,input_buffer1,
InFM_num, Input_w, Input_h, OutFM_num, Kernel_size, Kernel_stride,
TMP_R, TMP_C, TMP_M, m, TM_MIN, TR_MIN, TC_MIN, TN, TRow, TCol, Padding,IHxIW,KxK,IFM_numxKxK,nLoops,IsNL,LayerType,TM, TMP_M_next1,TM_MIN_next1, pingpongm, input_flag, process_flag);
write_back_output_reorg(output_buffer,Output,TMP_R,TMP_C,TMP_M_next0[0],output_w,output_h,TM_MIN_next0[0],TR_MIN,TC_MIN,OHxOW,write_flag);
pingpongm = 1;
}else
{
intra_pingpong_wrapper(Input,Weight,output_buffer,beta_buffer,input_buffer0,input_buffer1,
InFM_num, Input_w, Input_h, OutFM_num, Kernel_size, Kernel_stride,
TMP_R, TMP_C, TMP_M, m, TM_MIN, TR_MIN, TC_MIN, TN, TRow, TCol, Padding,IHxIW,KxK,IFM_numxKxK,nLoops,IsNL,LayerType,TM, TMP_M_next0,TM_MIN_next0, pingpongm, input_flag, process_flag);
write_back_output_reorg(output_buffer1,Output,TMP_R,TMP_C,TMP_M_next1[0],output_w,output_h,TM_MIN_next1[0],TR_MIN,TC_MIN,OHxOW,write_flag);
pingpongm = 0;
}
}
}
}
}
int Weight_reorgnaization_anti(float *Weight,float *Weight_reorg,float* Alpha,int IFM_NUM,int OFM_NUM,int Kernel_size,int TM,int TN,const bool IsBN)
{
const int KxK = Kernel_size*Kernel_size;
const int IFM_NUMxKxK = IFM_NUM*KxK;
int m,n;
int tm,tn,tk;
float weight_buffer[Tm*Tn*K*K];
float weight_buffer2[Tm*Tn*K*K];
int TM_MIN,TN_MIN;
int offset = 0;
for( m = 0; m < OFM_NUM; m += TM)
{
TM_MIN = MIN(TM,OFM_NUM - m);
for(n = 0;n < IFM_NUM; n += TN)
{
TN_MIN = MIN(TN,IFM_NUM - n);
int Woffset = m*IFM_NUMxKxK + n*KxK;
for(tm = 0;tm < TM_MIN; tm++)
{
memcpy((float *)(weight_buffer + tm*TN_MIN*KxK),
(float *)(Weight + tm*IFM_NUMxKxK + Woffset),TN_MIN*KxK*sizeof(float));
}
int TN_MINxTM_MIN = TN_MIN*TM_MIN;
for(tk = 0;tk < KxK; tk++)
for(tm = 0;tm < TM_MIN; tm++)
for(tn = 0;tn < TN_MIN;tn++)
{
weight_buffer2[tk*TN_MINxTM_MIN + tm*TN_MIN + tn] = weight_buffer[tm*TN_MIN*KxK + tn*KxK + tk];
}
memcpy((float *)(Weight_reorg+offset),weight_buffer2,TM_MIN*TN_MIN*KxK*sizeof(float));
offset += TM_MIN*TN_MIN*KxK;
}
}
return 0;
}
void yolov2_hls_ps(network *net, float *input)
{
int x;
network orig = *net;
net->input = input;
int weight_offset[32] = {864, 18432, 73728, 8192, 73728,
294912, 32768, 294912, 1179648, 131072, 1179648, 131072,
1179648, 4718592, 524288, 4718592, 524288, 4718592, 9437184,
9437184, 32768, 11796480, 435200, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int beta_offset[32] = {32, 64, 128, 64, 128, 256, 128, 256, 512, 256, 512, 256, 512, 1024,
512, 1024, 512, 1024, 1024, 1024, 64, 1024, 425, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int offset_index = 0;
float *Weight_buf = (float *)calloc(203767168/4,sizeof(float));
float *Beta_buf = (float *)calloc(43044/4,sizeof(float));
#ifdef REORG_TEST
FILE *fp_w = fopen("weightsv2_comb_reorg.bin", "rb");
if(!fp_w) file_error("weightsv2_comb_reorg.bin");
#else
FILE *fp_w = fopen("weights.bin", "rb");
if(!fp_w) file_error("weights.bin");
#endif
#ifdef REORG_GEN
float *Weight_reorg_buf = (float *)calloc(203767168/4,sizeof(float));
FILE *fp_w_reorg = fopen("weightsv2_comb_reorg.bin", "wb");
if(!fp_w_reorg) file_error("weightsv2_comb_reorg.bin");
#endif
FILE *fp_b = fopen("bias.bin", "rb");
if(!fp_b) file_error("bias.bin");
fread(Weight_buf, sizeof(float), 203767168/4, fp_w);
fread(Beta_buf, sizeof(float), 43044/4, fp_b);
fclose(fp_w);
fclose(fp_b);
#define MEM_LEN (416*416*32+208*208*32)
float *Memory_buf = (float*)calloc(MEM_LEN+1024+1024,sizeof(float));
float *Memory_top = Memory_buf+1024;
float *Memory_bottom = Memory_top + MEM_LEN;
memcpy(Memory_top,input,416*416*3*sizeof(float));//416x416x3 input_pic
float* in_ptr[32];
float* out_ptr[32];
#define ROUTE16_LEN (26*26*512)
#define CONV27_LEN (13*13*256)
#define CONV24_LEN (13*13*1024)
for(x=0;x<18;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - net->layers[x].outputs ;
}
else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
for(x=18;x<25;x++)
{
if(x%2==0)
{
in_ptr[x] = Memory_top;
out_ptr[x] = Memory_bottom - ROUTE16_LEN - net->layers[x].outputs;
}else
{
in_ptr[x] = out_ptr[x-1];
out_ptr[x] = Memory_top;
}
}
in_ptr[26] = Memory_bottom - ROUTE16_LEN;
out_ptr[26] = Memory_top;
in_ptr[27] = Memory_top;
out_ptr[27] = Memory_bottom - ROUTE16_LEN - CONV24_LEN - CONV27_LEN;
in_ptr[29] = out_ptr[27];
out_ptr[29] = Memory_top;
in_ptr[30] = Memory_top;
out_ptr[30] = Memory_bottom - net->layers[30].outputs;
in_ptr[31] = out_ptr[30];
network netp = *net;
int i;
int woffset = 0;
int aoffset = 0;
int boffset = 0;
int TR,TC,TM,TN;
int output_w,output_h;
int rLoops,cLoops,mLoops,nLoops;
double sum_gop = 0.0;
for(i = 0; i < netp.n; ++i)
{
netp.index = i;
layer l = netp.layers[i];
printf("Layer[%2d]: ",i);
switch(l.type)
{
case CONVOLUTIONAL:
printf("outputMemory:%8d;BN=%d;Activation=%d;conv %5d %2d x%2d /%2d %4d x%4d x%4d -> %4d x%4d x%4d %5.3f BFLOPs\n",l.outputs,l.batch_normalize,l.activation, l.n, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c, (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.);
sum_gop += (2.0 * l.n * l.size*l.size*l.c/l.groups * l.out_h*l.out_w)/1000000000.;
output_w = (l.w - l.size + 2*l.pad)/l.stride + 1 ;
output_h = (l.h - l.size + 2*l.pad)/l.stride + 1 ;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = MIN(l.n,Tm);
TN = MIN(l.c,Tn);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.n)/TM);
nLoops = (int)ceil(((float)l.c)/TN);
YOLO2_FPGA(in_ptr[i],out_ptr[i],Weight_buf+woffset,Beta_buf+boffset,
l.c,l.n,l.size,
l.stride,l.w,l.h,l.pad,l.activation==LEAKY?1:0,l.batch_normalize?1:0,
TM,TN,TR,TC,
mLoops,nLoops,rLoops,cLoops,0);
#ifdef REORG_GEN
Weight_reorgnaization_anti(Weight_buf + woffset,Weight_reorg_buf + woffset,NULL,l.c,l.n,l.size,TM,TN,0);
#endif
printf("TR=%d,TC=%d,TM=%d,TN=%d,rLoops=%d,cLoops=%d,mLoops=%d,nLoops=%d\n",TR,TC,TM,TN,rLoops,cLoops,mLoops,nLoops);
woffset += weight_offset[offset_index];
boffset += beta_offset[offset_index];
offset_index++;
break;
case MAXPOOL:
printf("outputMemory:%8d;max %d x %d / %d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.size, l.size, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
//output_w = (l.w - l.size)/l.stride + 1 ;
//output_h = (l.h - l.size)/l.stride + 1 ;
output_w = l.out_h;
output_h = l.out_w;
TR = MIN(((OnChipIB_Height-l.size)/l.stride+1),Tr);//keep Kernel_stride>=1
TC = MIN(((OnChipIB_Width-l.size)/l.stride+1),Tc);
TR = MIN(output_h,TR);
TC = MIN(output_w,TC);
TM = MIN(Tm,Tn);
TM = MIN(l.c,TM);
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = (int)ceil(((float)l.c)/TM);
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,l.c,l.c,
l.size,l.stride,l.w,l.h,l.pad,0,0,TM,0,TR,TC,mLoops,0,rLoops,cLoops,1);
break;
case REORG:
printf("outputMemory:%8d;reorg /%2d %4d x%4d x%4d -> %4d x%4d x%4d\n",l.outputs, l.stride, l.w, l.h, l.c, l.out_w, l.out_h, l.out_c);
output_w = 26;
output_h = 32*13;
TR = MIN(((OnChipIB_Height-l.stride)/l.stride+1),Tr);//keep Kernel_stride>=1
TR = MIN(output_h,TR);
TC = MIN(((OnChipIB_Width-l.stride)/l.stride+1),Tc);
TC = MIN(output_w,TC);
TM = 4;
rLoops = (int)ceil(((float)output_h)/TR);
cLoops = (int)ceil(((float)output_w)/TC);
mLoops = 1;
YOLO2_FPGA(in_ptr[i],out_ptr[i],NULL,NULL,1,4,
l.stride,l.stride,52,32*26,0,0,0,TM,0,TR,TC,mLoops,0,rLoops,cLoops,2);
break;
case ROUTE:
printf("outputMemory:%8d;route ",l.outputs);
int j;
for(j = 0; j < l.n; ++j){
printf(" %d", l.input_layers[j]);
}
printf("\n");
break;
case REGION:
printf("outputMemory:%8d;Detection\n",l.outputs);
netp.input = in_ptr[i];
forward_region_layer(l,netp);
break;
}
netp.input = l.output;
}
printf("SUM_GOP=%g\n",sum_gop);
*net = orig;
#ifdef REORG_GEN
fwrite(Weight_reorg_buf, sizeof(float), 203767168/4, fp_w_reorg);
fclose(fp_w_reorg);
free(Weight_reorg_buf);
#endif
free(Memory_buf);
free(Weight_buf);
free(Beta_buf);
}
///////////////////////////////////////////////////////////////////////20181229 anti-reorg ok end n4m32
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=MagickMin(GetMagickResourceLimit(WidthResource),
(MagickSizeType) LONG_MAX);
cache_info->height_limit=MagickMin(GetMagickResourceLimit(HeightResource),
(MagickSizeType) LONG_MAX);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
(void) exception;
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
ssize_t
i;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
{
#if defined(MAGICKCORE_HAVE_LINUX_SENDFILE)
if (cache_info->length < 0x7ffff000)
{
count=sendfile(clone_info->file,cache_info->file,(off_t *) NULL,
(size_t) cache_info->length);
if (count == (ssize_t) cache_info->length)
return(MagickTrue);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
}
#endif
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
}
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
const Quantum
*magick_restrict p;
const void
*magick_restrict r;
Quantum
*magick_restrict q;
ssize_t
i,
u;
unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the composite mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
y;
/*
Apply composite mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
if ((p == (Quantum *) NULL) || (q == (Quantum *) NULL))
return(MagickFalse);
for (y=0; y < (ssize_t) nexus_info->region.height; y++)
{
ssize_t
x;
for (x=0; x < (ssize_t) nexus_info->region.width; x++)
{
double
mask_alpha;
ssize_t
i;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
LONG_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
LONG_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
if (GetMagickResourceLimit(ListLengthResource) != MagickResourceInfinity)
{
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
}
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->type=DiskCache;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length == (MagickSizeType) ((size_t) length))
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status != MagickFalse)
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
LONG_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
LONG_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
ssize_t
y;
unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
Quantum
*magick_restrict q;
ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static inline MagickBooleanType ValidatePixelOffset(const ssize_t x,
const size_t a)
{
if ((x >= 0) && (x >= ((ssize_t) LONG_MAX-(ssize_t) a)))
return(MagickFalse);
if (x <= ((ssize_t) LONG_MIN+(ssize_t) a))
return(MagickFalse);
return(MagickTrue);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit) ||
(ValidatePixelOffset(x,width) == MagickFalse) ||
(ValidatePixelOffset(y,height) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const unsigned char
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
const Quantum
*magick_restrict p;
ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
convolution_1x1_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack4_neon(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 4b-4a-inch/4a-outch/4b
#if __aarch64__
kernel_tm_pack4.create(2 * 1, inch / 4, (outch / 4) / 2 + (outch / 4) % 2, (size_t)4u * 16, 16);
#else
kernel_tm_pack4.create(1, inch / 4, outch / 4, (size_t)4u * 16, 16);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
const float* k4 = (const float*)kernel + (q + 4) * inch;
const float* k5 = (const float*)kernel + (q + 5) * inch;
const float* k6 = (const float*)kernel + (q + 6) * inch;
const float* k7 = (const float*)kernel + (q + 7) * inch;
float* g0 = kernel_tm_pack4.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k4[0];
g0[5] = k5[0];
g0[6] = k6[0];
g0[7] = k7[0];
g0[8] = k0[1];
g0[9] = k1[1];
g0[10] = k2[1];
g0[11] = k3[1];
g0[12] = k4[1];
g0[13] = k5[1];
g0[14] = k6[1];
g0[15] = k7[1];
g0[16] = k0[2];
g0[17] = k1[2];
g0[18] = k2[2];
g0[19] = k3[2];
g0[20] = k4[2];
g0[21] = k5[2];
g0[22] = k6[2];
g0[23] = k7[2];
g0[24] = k0[3];
g0[25] = k1[3];
g0[26] = k2[3];
g0[27] = k3[3];
g0[28] = k4[3];
g0[29] = k5[3];
g0[30] = k6[3];
g0[31] = k7[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
k4 += 4;
k5 += 4;
k6 += 4;
k7 += 4;
g0 += 32;
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
#if __aarch64__
float* g0 = kernel_tm_pack4.channel(q / 8 + (q % 8) / 4);
#else
float* g0 = kernel_tm_pack4.channel(q / 4);
#endif
for (int p = 0; p + 3 < inch; p += 4)
{
g0[0] = k0[0];
g0[1] = k1[0];
g0[2] = k2[0];
g0[3] = k3[0];
g0[4] = k0[1];
g0[5] = k1[1];
g0[6] = k2[1];
g0[7] = k3[1];
g0[8] = k0[2];
g0[9] = k1[2];
g0[10] = k2[2];
g0[11] = k3[2];
g0[12] = k0[3];
g0[13] = k1[3];
g0[14] = k2[3];
g0[15] = k3[3];
k0 += 4;
k1 += 4;
k2 += 4;
k3 += 4;
g0 += 16;
}
}
}
static void conv1x1s1_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const float* bias = _bias;
// interleave
#if __aarch64__
Mat tmp(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
#else
Mat tmp(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
#endif
{
int nn_size;
int remain_size_start;
#if __aarch64__
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
float* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v8.4s}, [%1], #16 \n"
"sub %0, %0, #128 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v11.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
img0 += bottom_blob.cstep * 4;
}
}
#else
remain_size_start = 0;
#endif
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
float* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
"sub %0, %0, #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0!, {d0-d7} \n"
"pld [%0, #512] \n"
"vldm %0, {d16-d23} \n"
// transpose 8x4
"vtrn.32 q0, q1 \n"
"vtrn.32 q2, q3 \n"
"vtrn.32 q8, q9 \n"
"vtrn.32 q10, q11 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vswp q1, q8 \n"
"vswp q3, q10 \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
"sub %0, %0, #64 \n"
"vst1.f32 {d4-d7}, [%1 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vstm %1!, {d0-d7} \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld1.f32 {d0-d3}, [%0 :128] \n"
"vst1.f32 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const float* img0 = bottom_blob.channel(0);
img0 += i * 4;
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += bottom_blob.cstep * 4;
}
}
}
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
int nn_outch = 0;
nn_outch = outch >> 1;
remain_outch_start = nn_outch << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 2;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p + 1);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
const float* tmpptr = tmp.channel(i / 12);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"fmla v20.4s, v5.4s, v0.s[0] \n"
"fmla v21.4s, v5.4s, v0.s[1] \n"
"fmla v22.4s, v5.4s, v0.s[2] \n"
"fmla v23.4s, v5.4s, v0.s[3] \n"
"fmla v24.4s, v5.4s, v1.s[0] \n"
"fmla v25.4s, v5.4s, v1.s[1] \n"
"fmla v26.4s, v5.4s, v1.s[2] \n"
"fmla v27.4s, v5.4s, v1.s[3] \n"
"fmla v28.4s, v5.4s, v2.s[0] \n"
"fmla v29.4s, v5.4s, v2.s[1] \n"
"fmla v30.4s, v5.4s, v2.s[2] \n"
"fmla v31.4s, v5.4s, v2.s[3] \n"
"fmla v8.4s, v6.4s, v3.s[0] \n"
"fmla v9.4s, v6.4s, v3.s[1] \n"
"fmla v10.4s, v6.4s, v3.s[2] \n"
"fmla v11.4s, v6.4s, v3.s[3] \n"
"fmla v20.4s, v7.4s, v3.s[0] \n"
"fmla v21.4s, v7.4s, v3.s[1] \n"
"fmla v22.4s, v7.4s, v3.s[2] \n"
"fmla v23.4s, v7.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v12.4s, v6.4s, v0.s[0] \n"
"fmla v13.4s, v6.4s, v0.s[1] \n"
"fmla v14.4s, v6.4s, v0.s[2] \n"
"fmla v15.4s, v6.4s, v0.s[3] \n"
"fmla v16.4s, v6.4s, v1.s[0] \n"
"fmla v17.4s, v6.4s, v1.s[1] \n"
"fmla v18.4s, v6.4s, v1.s[2] \n"
"fmla v19.4s, v6.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v0.s[0] \n"
"fmla v25.4s, v7.4s, v0.s[1] \n"
"fmla v26.4s, v7.4s, v0.s[2] \n"
"fmla v27.4s, v7.4s, v0.s[3] \n"
"fmla v28.4s, v7.4s, v1.s[0] \n"
"fmla v29.4s, v7.4s, v1.s[1] \n"
"fmla v30.4s, v7.4s, v1.s[2] \n"
"fmla v31.4s, v7.4s, v1.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01
"fmla v8.4s, v4.4s, v2.s[0] \n"
"fmla v9.4s, v4.4s, v2.s[1] \n"
"fmla v10.4s, v4.4s, v2.s[2] \n"
"fmla v11.4s, v4.4s, v2.s[3] \n"
"fmla v12.4s, v4.4s, v3.s[0] \n"
"fmla v13.4s, v4.4s, v3.s[1] \n"
"fmla v14.4s, v4.4s, v3.s[2] \n"
"fmla v15.4s, v4.4s, v3.s[3] \n"
"fmla v20.4s, v5.4s, v2.s[0] \n"
"fmla v21.4s, v5.4s, v2.s[1] \n"
"fmla v22.4s, v5.4s, v2.s[2] \n"
"fmla v23.4s, v5.4s, v2.s[3] \n"
"fmla v24.4s, v5.4s, v3.s[0] \n"
"fmla v25.4s, v5.4s, v3.s[1] \n"
"fmla v26.4s, v5.4s, v3.s[2] \n"
"fmla v27.4s, v5.4s, v3.s[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v4.4s, v0.s[1] \n"
"fmla v18.4s, v4.4s, v0.s[2] \n"
"fmla v19.4s, v4.4s, v0.s[3] \n"
"fmla v28.4s, v5.4s, v0.s[0] \n"
"fmla v29.4s, v5.4s, v0.s[1] \n"
"fmla v30.4s, v5.4s, v0.s[2] \n"
"fmla v31.4s, v5.4s, v0.s[3] \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v9.4s, v6.4s, v1.s[1] \n"
"fmla v10.4s, v6.4s, v1.s[2] \n"
"fmla v11.4s, v6.4s, v1.s[3] \n"
"fmla v12.4s, v6.4s, v2.s[0] \n"
"fmla v13.4s, v6.4s, v2.s[1] \n"
"fmla v14.4s, v6.4s, v2.s[2] \n"
"fmla v15.4s, v6.4s, v2.s[3] \n"
"fmla v16.4s, v6.4s, v3.s[0] \n"
"fmla v17.4s, v6.4s, v3.s[1] \n"
"fmla v18.4s, v6.4s, v3.s[2] \n"
"fmla v19.4s, v6.4s, v3.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.4s, v7.4s, v1.s[0] \n"
"fmla v21.4s, v7.4s, v1.s[1] \n"
"fmla v22.4s, v7.4s, v1.s[2] \n"
"fmla v23.4s, v7.4s, v1.s[3] \n"
"fmla v24.4s, v7.4s, v2.s[0] \n"
"fmla v25.4s, v7.4s, v2.s[1] \n"
"fmla v26.4s, v7.4s, v2.s[2] \n"
"fmla v27.4s, v7.4s, v2.s[3] \n"
"fmla v28.4s, v7.4s, v3.s[0] \n"
"fmla v29.4s, v7.4s, v3.s[1] \n"
"fmla v30.4s, v7.4s, v3.s[2] \n"
"fmla v31.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"mov v24.16b, v1.16b \n"
"mov v25.16b, v1.16b \n"
"mov v26.16b, v1.16b \n"
"mov v27.16b, v1.16b \n"
"mov v28.16b, v1.16b \n"
"mov v29.16b, v1.16b \n"
"mov v30.16b, v1.16b \n"
"mov v31.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v24.4s, v9.4s, v0.s[0] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v26.4s, v9.4s, v2.s[0] \n"
"fmla v27.4s, v9.4s, v3.s[0] \n"
"fmla v28.4s, v9.4s, v4.s[0] \n"
"fmla v29.4s, v9.4s, v5.s[0] \n"
"fmla v30.4s, v9.4s, v6.s[0] \n"
"fmla v31.4s, v9.4s, v7.s[0] \n"
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v10.4s, v4.s[1] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v6.s[1] \n"
"fmla v23.4s, v10.4s, v7.s[1] \n"
"fmla v24.4s, v11.4s, v0.s[1] \n"
"fmla v25.4s, v11.4s, v1.s[1] \n"
"fmla v26.4s, v11.4s, v2.s[1] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v28.4s, v11.4s, v4.s[1] \n"
"fmla v29.4s, v11.4s, v5.s[1] \n"
"fmla v30.4s, v11.4s, v6.s[1] \n"
"fmla v31.4s, v11.4s, v7.s[1] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v21.4s, v12.4s, v5.s[2] \n"
"fmla v22.4s, v12.4s, v6.s[2] \n"
"fmla v23.4s, v12.4s, v7.s[2] \n"
"fmla v24.4s, v13.4s, v0.s[2] \n"
"fmla v25.4s, v13.4s, v1.s[2] \n"
"fmla v26.4s, v13.4s, v2.s[2] \n"
"fmla v27.4s, v13.4s, v3.s[2] \n"
"fmla v28.4s, v13.4s, v4.s[2] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v30.4s, v13.4s, v6.s[2] \n"
"fmla v31.4s, v13.4s, v7.s[2] \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v14.4s, v4.s[3] \n"
"fmla v21.4s, v14.4s, v5.s[3] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v23.4s, v14.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v24.4s, v15.4s, v0.s[3] \n"
"fmla v25.4s, v15.4s, v1.s[3] \n"
"fmla v26.4s, v15.4s, v2.s[3] \n"
"fmla v27.4s, v15.4s, v3.s[3] \n"
"fmla v28.4s, v15.4s, v4.s[3] \n"
"fmla v29.4s, v15.4s, v5.s[3] \n"
"fmla v30.4s, v15.4s, v6.s[3] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
"st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v1.16b \n"
"mov v21.16b, v1.16b \n"
"mov v22.16b, v1.16b \n"
"mov v23.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v20.4s, v9.4s, v0.s[0] \n"
"fmla v21.4s, v9.4s, v1.s[0] \n"
"fmla v22.4s, v9.4s, v2.s[0] \n"
"fmla v23.4s, v9.4s, v3.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v19.4s, v10.4s, v3.s[1] \n"
"fmla v20.4s, v11.4s, v0.s[1] \n"
"fmla v21.4s, v11.4s, v1.s[1] \n"
"fmla v22.4s, v11.4s, v2.s[1] \n"
"fmla v23.4s, v11.4s, v3.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v12.4s, v2.s[2] \n"
"fmla v19.4s, v12.4s, v3.s[2] \n"
"fmla v20.4s, v13.4s, v0.s[2] \n"
"fmla v21.4s, v13.4s, v1.s[2] \n"
"fmla v22.4s, v13.4s, v2.s[2] \n"
"fmla v23.4s, v13.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v14.4s, v2.s[3] \n"
"fmla v19.4s, v14.4s, v3.s[3] \n"
"fmla v20.4s, v15.4s, v0.s[3] \n"
"fmla v21.4s, v15.4s, v1.s[3] \n"
"fmla v22.4s, v15.4s, v2.s[3] \n"
"fmla v23.4s, v15.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 1 < size; i += 2)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s, v1.4s}, [%10] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v1.16b \n"
"mov v19.16b, v1.16b \n"
"0: \n"
"prfm pldl1keep, [%3, #256] \n"
"ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v1.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v10.4s, v1.s[1] \n"
"fmla v18.4s, v11.4s, v0.s[1] \n"
"fmla v19.4s, v11.4s, v1.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v12.4s, v1.s[2] \n"
"fmla v18.4s, v13.4s, v0.s[2] \n"
"fmla v19.4s, v13.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v14.4s, v1.s[3] \n"
"fmla v18.4s, v15.4s, v0.s[3] \n"
"fmla v19.4s, v15.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i < size; i++)
{
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* kptr01 = (const float*)kernel.channel(pp);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%10] \n"
"0: \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v0.4s}, [%3], #16 \n" // r0
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01
"fmla v16.4s, v10.4s, v0.s[1] \n"
"fmla v17.4s, v11.4s, v0.s[1] \n"
"fmla v16.4s, v12.4s, v0.s[2] \n"
"fmla v17.4s, v13.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v0.s[3] \n"
"fmla v17.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(tmpptr), // %3
"=r"(kptr01) // %4
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(tmpptr),
"4"(kptr01),
"r"(biasptr) // %10
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
}
#endif // __ARM_NEON && __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* outptr0 = top_blob.channel(p);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p * 4 : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
float* tmpptr = tmp.channel(i / 12);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v8.16b, v0.16b \n"
"mov v9.16b, v0.16b \n"
"mov v10.16b, v0.16b \n"
"mov v11.16b, v0.16b \n"
"mov v12.16b, v0.16b \n"
"mov v13.16b, v0.16b \n"
"mov v14.16b, v0.16b \n"
"mov v15.16b, v0.16b \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v12.4s, v4.4s, v1.s[0] \n"
"fmla v13.4s, v4.4s, v1.s[1] \n"
"fmla v14.4s, v4.4s, v1.s[2] \n"
"fmla v15.4s, v4.4s, v1.s[3] \n"
"fmla v16.4s, v4.4s, v2.s[0] \n"
"fmla v17.4s, v4.4s, v2.s[1] \n"
"fmla v18.4s, v4.4s, v2.s[2] \n"
"fmla v19.4s, v4.4s, v2.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n"
"fmla v8.4s, v5.4s, v3.s[0] \n"
"fmla v9.4s, v5.4s, v3.s[1] \n"
"fmla v10.4s, v5.4s, v3.s[2] \n"
"fmla v11.4s, v5.4s, v3.s[3] \n"
"fmla v12.4s, v5.4s, v20.s[0] \n"
"fmla v13.4s, v5.4s, v20.s[1] \n"
"fmla v14.4s, v5.4s, v20.s[2] \n"
"fmla v15.4s, v5.4s, v20.s[3] \n"
"fmla v16.4s, v5.4s, v21.s[0] \n"
"fmla v17.4s, v5.4s, v21.s[1] \n"
"fmla v18.4s, v5.4s, v21.s[2] \n"
"fmla v19.4s, v5.4s, v21.s[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n"
"fmla v8.4s, v6.4s, v22.s[0] \n"
"fmla v9.4s, v6.4s, v22.s[1] \n"
"fmla v10.4s, v6.4s, v22.s[2] \n"
"fmla v11.4s, v6.4s, v22.s[3] \n"
"fmla v12.4s, v6.4s, v23.s[0] \n"
"fmla v13.4s, v6.4s, v23.s[1] \n"
"fmla v14.4s, v6.4s, v23.s[2] \n"
"fmla v15.4s, v6.4s, v23.s[3] \n"
"fmla v16.4s, v6.4s, v24.s[0] \n"
"fmla v17.4s, v6.4s, v24.s[1] \n"
"fmla v18.4s, v6.4s, v24.s[2] \n"
"fmla v19.4s, v6.4s, v24.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v7.4s, v25.s[0] \n"
"fmla v9.4s, v7.4s, v25.s[1] \n"
"fmla v10.4s, v7.4s, v25.s[2] \n"
"fmla v11.4s, v7.4s, v25.s[3] \n"
"fmla v12.4s, v7.4s, v26.s[0] \n"
"fmla v13.4s, v7.4s, v26.s[1] \n"
"fmla v14.4s, v7.4s, v26.s[2] \n"
"fmla v15.4s, v7.4s, v26.s[3] \n"
"fmla v16.4s, v7.4s, v27.s[0] \n"
"fmla v17.4s, v7.4s, v27.s[1] \n"
"fmla v18.4s, v7.4s, v27.s[2] \n"
"fmla v19.4s, v7.4s, v27.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif
for (; i + 7 < size; i += 8)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"mov v20.16b, v0.16b \n"
"mov v21.16b, v0.16b \n"
"mov v22.16b, v0.16b \n"
"mov v23.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7
"fmla v20.4s, v8.4s, v4.s[0] \n"
"fmla v21.4s, v8.4s, v5.s[0] \n"
"fmla v22.4s, v8.4s, v6.s[0] \n"
"fmla v23.4s, v8.4s, v7.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v20.4s, v9.4s, v4.s[1] \n"
"fmla v21.4s, v9.4s, v5.s[1] \n"
"fmla v22.4s, v9.4s, v6.s[1] \n"
"fmla v23.4s, v9.4s, v7.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"fmla v20.4s, v10.4s, v4.s[2] \n"
"fmla v21.4s, v10.4s, v5.s[2] \n"
"fmla v22.4s, v10.4s, v6.s[2] \n"
"fmla v23.4s, v10.4s, v7.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"fmla v20.4s, v11.4s, v4.s[3] \n"
"fmla v21.4s, v11.4s, v5.s[3] \n"
"fmla v22.4s, v11.4s, v6.s[3] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
"st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"vmov q12, q0 \n"
"vmov q13, q0 \n"
"vmov q14, q0 \n"
"vmov q15, q0 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q12, q4, d2[0] \n"
"vmla.f32 q13, q4, d2[1] \n"
"vmla.f32 q14, q4, d3[0] \n"
"vmla.f32 q15, q4, d3[1] \n"
"vmla.f32 q8, q5, d4[0] \n"
"vmla.f32 q9, q5, d4[1] \n"
"vmla.f32 q10, q5, d5[0] \n"
"vmla.f32 q11, q5, d5[1] \n"
"vmla.f32 q12, q5, d6[0] \n"
"vmla.f32 q13, q5, d6[1] \n"
"vmla.f32 q14, q5, d7[0] \n"
"vmla.f32 q15, q5, d7[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"vmla.f32 q8, q6, d0[0] \n"
"vmla.f32 q9, q6, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q6, d1[1] \n"
"vmla.f32 q12, q6, d2[0] \n"
"vmla.f32 q13, q6, d2[1] \n"
"vmla.f32 q14, q6, d3[0] \n"
"vmla.f32 q15, q6, d3[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d4[0] \n"
"vmla.f32 q9, q7, d4[1] \n"
"vmla.f32 q10, q7, d5[0] \n"
"vmla.f32 q11, q7, d5[1] \n"
"vmla.f32 q12, q7, d6[0] \n"
"vmla.f32 q13, q7, d6[1] \n"
"vmla.f32 q14, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
"vstm %1!, {d24-d31} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"mov v18.16b, v0.16b \n"
"mov v19.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v18.4s, v8.4s, v2.s[0] \n"
"fmla v19.4s, v8.4s, v3.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[1] \n"
"fmla v19.4s, v9.4s, v3.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"fmla v18.4s, v10.4s, v2.s[2] \n"
"fmla v19.4s, v10.4s, v3.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"fmla v18.4s, v11.4s, v2.s[3] \n"
"fmla v19.4s, v11.4s, v3.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"vmov q10, q0 \n"
"vmov q11, q0 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q10, q4, d4[0] \n"
"vmla.f32 q11, q4, d6[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d4[1] \n"
"vmla.f32 q11, q5, d6[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d7[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"vmla.f32 q10, q7, d5[1] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"vstm %1!, {d16-d23} \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif
}
for (; i + 1 < size; i += 2)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%8] \n"
"mov v16.16b, v0.16b \n"
"mov v17.16b, v0.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v1.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"fmla v17.4s, v9.4s, v1.s[1] \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v17.4s, v10.4s, v1.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"fmla v17.4s, v11.4s, v1.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17");
#else
asm volatile(
"vld1.f32 {d0-d1}, [%8] \n"
"vmov q8, q0 \n"
"vmov q9, q0 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.f32 {d0-d3}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d2[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q9, q6, d3[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q7, d1[1] \n"
"vmla.f32 q9, q7, d3[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9");
#endif
}
for (; i < size; i++)
{
#if __aarch64__
float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* kptr0 = (const float*)kernel.channel(p / 2 + p % 2);
#else
float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
const float* kptr0 = (const float*)kernel.channel(p);
#endif
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v16.4s}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v16.4s, v9.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v10.4s, v0.s[2] \n"
"fmla v16.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16");
#else
asm volatile(
"vld1.f32 {d16-d17}, [%8] \n"
"0: \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"pld [%3, #512] \n"
"vldm %3!, {d8-d15} \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8");
#endif
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 4;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _v = vld1q_f32(r0);
vst1q_f32(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack4_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
GB_unaryop__minv_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_int16
// op(A') function: GB_tran__minv_fp32_int16
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_int16
(
float *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kgraph-data.h | #ifndef WDONG_KGRAPH_DATA
#define WDONG_KGRAPH_DATA
#include <cmath>
#include <cstring>
#include <malloc.h>
#include <vector>
#include <fstream>
#include <stdexcept>
#include <boost/assert.hpp>
#ifdef __GNUC__
#ifdef __AVX__
#define KGRAPH_MATRIX_ALIGN 32
#else
#ifdef __SSE2__
#define KGRAPH_MATRIX_ALIGN 16
#else
#define KGRAPH_MATRIX_ALIGN 4
#endif
#endif
#endif
namespace kgraph {
/// L2 square distance with AVX instructions.
/** AVX instructions have strong alignment requirement for t1 and t2.
*/
extern float float_l2sqr_avx (float const *t1, float const *t2, unsigned dim);
/// L2 square distance with SSE2 instructions.
extern float float_l2sqr_sse2 (float const *t1, float const *t2, unsigned dim);
extern float float_l2sqr_sse2 (float const *, unsigned dim);
extern float float_dot_sse2 (float const *, float const *, unsigned dim);
/// L2 square distance for uint8_t with SSE2 instructions (for SIFT).
extern float uint8_l2sqr_sse2 (uint8_t const *t1, uint8_t const *t2, unsigned dim);
extern float float_l2sqr (float const *, float const *, unsigned dim);
extern float float_l2sqr (float const *, unsigned dim);
extern float float_dot (float const *, float const *, unsigned dim);
using std::vector;
/// namespace for various distance metrics.
namespace metric {
/// L2 square distance.
struct l2sqr {
template <typename T>
/// L2 square distance.
static float apply (T const *t1, T const *t2, unsigned dim) {
float r = 0;
for (unsigned i = 0; i < dim; ++i) {
float v = float(t1[i]) - float(t2[i]);
v *= v;
r += v;
}
return r;
}
/// inner product.
template <typename T>
static float dot (T const *t1, T const *t2, unsigned dim) {
float r = 0;
for (unsigned i = 0; i < dim; ++i) {
r += float(t1[i]) *float(t2[i]);
}
return r;
}
/// L2 norm.
template <typename T>
static float norm2 (T const *t1, unsigned dim) {
float r = 0;
for (unsigned i = 0; i < dim; ++i) {
float v = float(t1[i]);
v *= v;
r += v;
}
return r;
}
};
struct l2 {
template <typename T>
static float apply (T const *t1, T const *t2, unsigned dim) {
return sqrt(l2sqr::apply<T>(t1, t2, dim));
}
};
}
/// Matrix data.
template <typename T, unsigned A = KGRAPH_MATRIX_ALIGN>
class Matrix {
unsigned col;
unsigned row;
size_t stride;
char *data;
void reset (unsigned r, unsigned c) {
row = r;
col = c;
stride = (sizeof(T) * c + A - 1) / A * A;
/*
data.resize(row * stride);
*/
if (data) free(data);
data = (char *)memalign(A, row * stride); // SSE instruction needs data to be aligned
if (!data) throw runtime_error("memalign");
}
public:
Matrix (): col(0), row(0), stride(0), data(0) {}
Matrix (unsigned r, unsigned c): data(0) {
reset(r, c);
}
~Matrix () {
if (data) free(data);
}
unsigned size () const {
return row;
}
unsigned dim () const {
return col;
}
size_t step () const {
return stride;
}
void resize (unsigned r, unsigned c) {
reset(r, c);
}
T const *operator [] (unsigned i) const {
return reinterpret_cast<T const *>(&data[stride * i]);
}
T *operator [] (unsigned i) {
return reinterpret_cast<T *>(&data[stride * i]);
}
void zero () {
memset(data, 0, row * stride);
}
void normalize2 () {
#pragma omp parallel for
for (unsigned i = 0; i < row; ++i) {
T *p = operator[](i);
double sum = metric::l2sqr::norm2(p, col);
sum = std::sqrt(sum);
for (unsigned j = 0; j < col; ++j) {
p[j] /= sum;
}
}
}
void load (const std::string &path, unsigned dim, unsigned skip = 0, unsigned gap = 0) {
std::ifstream is(path.c_str(), std::ios::binary);
if (!is) throw io_error(path);
is.seekg(0, std::ios::end);
size_t size = is.tellg();
size -= skip;
unsigned line = sizeof(T) * dim + gap;
unsigned N = size / line;
reset(N, dim);
zero();
is.seekg(skip, std::ios::beg);
for (unsigned i = 0; i < N; ++i) {
is.read(&data[stride * i], sizeof(T) * dim);
is.seekg(gap, std::ios::cur);
}
if (!is) throw io_error(path);
}
void load_lshkit (std::string const &path) {
static const unsigned LSHKIT_HEADER = 3;
std::ifstream is(path.c_str(), std::ios::binary);
unsigned header[LSHKIT_HEADER]; /* entry size, row, col */
is.read((char *)header, sizeof header);
if (!is) throw io_error(path);
if (header[0] != sizeof(T)) throw io_error(path);
is.close();
unsigned D = header[2];
unsigned skip = LSHKIT_HEADER * sizeof(unsigned);
unsigned gap = 0;
load(path, D, skip, gap);
}
void save_lshkit (std::string const &path) {
std::ofstream os(path.c_str(), std::ios::binary);
unsigned header[3];
assert(sizeof header == 3*4);
header[0] = sizeof(T);
header[1] = row;
header[2] = col;
os.write((const char *)header, sizeof(header));
for (unsigned i = 0; i < row; ++i) {
os.write(&data[stride * i], sizeof(T) * col);
}
}
};
/// Matrix proxy to interface with 3rd party libraries (FLANN, OpenCV, NumPy).
template <typename DATA_TYPE, unsigned A = KGRAPH_MATRIX_ALIGN>
class MatrixProxy {
unsigned rows;
unsigned cols; // # elements, not bytes, in a row,
size_t stride; // # bytes in a row, >= cols * sizeof(element)
uint8_t const *data;
public:
MatrixProxy (Matrix<DATA_TYPE> const &m)
: rows(m.size()), cols(m.dim()), stride(m.step()), data(reinterpret_cast<uint8_t const *>(m[0])) {
}
#ifndef __AVX__
#ifdef FLANN_DATASET_H_
/// Construct from FLANN matrix.
MatrixProxy (flann::Matrix<DATA_TYPE> const &m)
: rows(m.rows), cols(m.cols), stride(m.stride), data(m.data) {
if (stride % A) throw invalid_argument("bad alignment");
}
#endif
#ifdef CV_MAJOR_VERSION
/// Construct from OpenCV matrix.
MatrixProxy (cv::Mat const &m)
: rows(m.rows), cols(m.cols), stride(m.step), data(m.data) {
if (stride % A) throw invalid_argument("bad alignment");
}
#endif
#ifdef NPY_NDARRAYOBJECT_H
/// Construct from NumPy matrix.
MatrixProxy (PyArrayObject *obj) {
if (!obj || (obj->nd != 2)) throw invalid_argument("bad array shape");
rows = obj->dimensions[0];
cols = obj->dimensions[1];
stride = obj->strides[0];
data = reinterpret_cast<uint8_t const *>(obj->data);
if (obj->descr->elsize != sizeof(DATA_TYPE)) throw invalid_argument("bad data type size");
if (stride % A) throw invalid_argument("bad alignment");
if (!(stride >= cols * sizeof(DATA_TYPE))) throw invalid_argument("bad stride");
}
#endif
#endif
unsigned size () const {
return rows;
}
unsigned dim () const {
return cols;
}
DATA_TYPE const *operator [] (unsigned i) const {
return reinterpret_cast<DATA_TYPE const *>(data + stride * i);
}
DATA_TYPE *operator [] (unsigned i) {
return const_cast<DATA_TYPE *>(reinterpret_cast<DATA_TYPE const *>(data + stride * i));
}
};
/// Oracle for Matrix or MatrixProxy.
/** DATA_TYPE can be Matrix or MatrixProxy,
* DIST_TYPE should be one class within the namespace kgraph.metric.
*/
template <typename DATA_TYPE, typename DIST_TYPE>
class MatrixOracle: public kgraph::IndexOracle {
MatrixProxy<DATA_TYPE> proxy;
public:
class SearchOracle: public kgraph::SearchOracle {
MatrixProxy<DATA_TYPE> proxy;
DATA_TYPE const *query;
public:
SearchOracle (MatrixProxy<DATA_TYPE> const &p, DATA_TYPE const *q): proxy(p), query(q) {
}
virtual unsigned size () const {
return proxy.size();
}
virtual float operator () (unsigned i) const {
return DIST_TYPE::apply(proxy[i], query, proxy.dim());
}
};
template <typename MATRIX_TYPE>
MatrixOracle (MATRIX_TYPE const &m): proxy(m) {
}
virtual unsigned size () const {
return proxy.size();
}
virtual float operator () (unsigned i, unsigned j) const {
return DIST_TYPE::apply(proxy[i], proxy[j], proxy.dim());
}
SearchOracle query (DATA_TYPE const *query) const {
return SearchOracle(proxy, query);
}
};
inline float AverageRecall (Matrix<float> const &gs, Matrix<float> const &result, unsigned K = 0) {
if (K == 0) {
K = result.dim();
}
if (!(gs.dim() >= K)) throw invalid_argument("gs.dim() >= K");
if (!(result.dim() >= K)) throw invalid_argument("result.dim() >= K");
if (!(gs.size() >= result.size())) throw invalid_argument("gs.size() > result.size()");
float sum = 0;
for (unsigned i = 0; i < result.size(); ++i) {
float const *gs_row = gs[i];
float const *re_row = result[i];
// compare
unsigned found = 0;
unsigned gs_n = 0;
unsigned re_n = 0;
while ((gs_n < K) && (re_n < K)) {
if (gs_row[gs_n] < re_row[re_n]) {
++gs_n;
}
else if (gs_row[gs_n] == re_row[re_n]) {
++found;
++gs_n;
++re_n;
}
else {
throw runtime_error("distance is unstable");
}
}
sum += float(found) / K;
}
return sum / result.size();
}
}
#ifndef KGRAPH_NO_VECTORIZE
#ifdef __GNUC__
#ifdef __AVX__
#if 0
namespace kgraph { namespace metric {
template <>
inline float l2sqr::apply<float> (float const *t1, float const *t2, unsigned dim) {
return float_l2sqr_avx(t1, t2, dim);
}
}}
#endif
#else
#ifdef __SSE2__
namespace kgraph { namespace metric {
template <>
inline float l2sqr::apply<float> (float const *t1, float const *t2, unsigned dim) {
return float_l2sqr_sse2(t1, t2, dim);
}
template <>
inline float l2sqr::dot<float> (float const *t1, float const *t2, unsigned dim) {
return float_dot_sse2(t1, t2, dim);
}
template <>
inline float l2sqr::norm2<float> (float const *t1, unsigned dim) {
return float_l2sqr_sse2(t1, dim);
}
template <>
inline float l2sqr::apply<uint8_t> (uint8_t const *t1, uint8_t const *t2, unsigned dim) {
return uint8_l2sqr_sse2(t1, t2, dim);
}
}}
#endif
#endif
#endif
#endif
#endif
|
GB_unop__identity_uint64_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_fc32)
// op(A') function: GB (_unop_tran__identity_uint64_fc32)
// C type: uint64_t
// A type: GxB_FC32_t
// cast: uint64_t cij = GB_cast_to_uint64_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_fc32)
(
uint64_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
StreamTriad_par4.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "timer.h"
int main(int argc, char *argv[]){
int nsize = 20000000, ntimes=16;
double* restrict a = malloc(nsize * sizeof(double));
double* restrict b = malloc(nsize * sizeof(double));
double* restrict c = malloc(nsize * sizeof(double));
#pragma omp target enter data map(to:a[0:nsize], b[0:nsize], c[0:nsize])
struct timespec tstart;
// initializing data and arrays
double scalar = 3.0, time_sum = 0.0;
#pragma omp target teams distribute parallel for simd
for (int i=0; i<nsize; i++) {
a[i] = 1.0;
b[i] = 2.0;
}
for (int k=0; k<ntimes; k++){
cpu_timer_start(&tstart);
// stream triad loop
#pragma omp target teams distribute parallel for simd
for (int i=0; i<nsize; i++){
c[i] = a[i] + scalar*b[i];
}
time_sum += cpu_timer_stop(tstart);
}
printf("Average runtime for stream triad loop is %lf msecs\n", time_sum/ntimes);
#pragma omp target exit data map(from:a[0:nsize], b[0:nsize], c[0:nsize])
free(a);
free(b);
free(c);
return(0);
}
|
velocity.c | #include "allvars.h"
#include "velocity.h"
void ComputeVelocity()
{
int i,j,k,ic,jc,kc,in,m,NumGrid;
int ii,jj,kk,l,next,Counter,NG[3];
double xc[3],xt[3],dx[3],vt[3],GAP,GridSize[3];
double dist,Radius,PLUS,MaxDist,MinDist;
struct grid *GridList;
int NumNeigh;
struct neighbour Neigh;
clock_t t;
fprintf(logfile,"\n COMPUTING VOID BULK VELOCITIES \n");
t = clock();
NumGrid = (int)round(cbrt((double)NumTrac/100.0));
if (NumGrid < 50) NumGrid = 50;
GridList = (struct grid *) malloc(NumGrid*NumGrid*NumGrid*sizeof(struct grid));
BuildGridList(GridList,NumGrid,GridSize,0,false);
GAP = 0.0;
for (k=0; k<3; k++)
if (GridSize[k] > GAP)
GAP = GridSize[k];
GAP *= sqrt(3.0);
MinDist = 0.0;
MaxDist = OuterShellVel*MaxRadiusSearch + GAP;
SearchNeighbours(&Neigh,&NumNeigh,GridSize,MinDist,MaxDist);
fprintf(logfile," | MinDist - MaxDist = %5.3f - %5.3f [Mpc/h], %d grids \n",MinDist,MaxDist,NumNeigh);
fflush(logfile);
#pragma omp parallel for default(none) schedule(dynamic) \
shared(NumVoid,Void,Tracer,NumNeigh,Neigh,LBox,InnerShellVel,OuterShellVel,\
NumGrid,GridSize,GridList) \
private(i,l,k,m,Radius,xc,ic,jc,kc,ii,jj,kk,next,dx,xt,dist,Counter,vt,PLUS,in)
for (i=0; i<NumVoid; i++) {
//if (omp_get_thread_num() == 0) Progress(i,NumVoid);
if (!Void[i].ToF) continue;
Counter = 0;
PLUS = 0.0;
Radius = Void[i].Rad;
for (k=0; k<3; k++) {
xc[k] = (double)Void[i].Pos[k];
Void[i].Vel[k] = 0.0;
}
ic = (int)(xc[0]/GridSize[0]);
jc = (int)(xc[1]/GridSize[1]);
kc = (int)(xc[2]/GridSize[2]);
do {
for (in=0; in<NumNeigh; in++) {
ii = PeriodicGrid(Neigh.i[in] + ic,NumGrid);
jj = PeriodicGrid(Neigh.j[in] + jc,NumGrid);
kk = PeriodicGrid(Neigh.k[in] + kc,NumGrid);
l = Index1D(ii,jj,kk,NumGrid);
if (GridList[l].NumMem == 0) continue;
for (m=0; m<GridList[l].NumMem; m++) {
next = GridList[l].Member[m];
for (k=0; k<3; k++) {
xt[k] = (double)Tracer[next].Pos[k];
vt[k] = (double)Tracer[next].Vel[k];
dx[k] = PeriodicDeltaPos(xc[k] - xt[k],LBox[k]);
}
dist = sqrt(dx[0]*dx[0] + dx[1]*dx[1] + dx[2]*dx[2]);
dist /= Radius;
if (dist > InnerShellVel-PLUS && dist < OuterShellVel+PLUS) {
Void[i].Vel[0] += vt[0];
Void[i].Vel[1] += vt[1];
Void[i].Vel[2] += vt[2];
Counter++;
}
}
}
PLUS += 0.05;
} while (Counter == 0);
Void[i].Vel[0] /= (double)Counter;
Void[i].Vel[1] /= (double)Counter;
Void[i].Vel[2] /= (double)Counter;
}
FreeGridList(GridList,NumGrid);
FreeNeighbours(&Neigh);
StepName.push_back("Computing velocities");
StepTime.push_back(Time(t,OMPcores));
}
|
blockchain_fmt_plug.c | /* blockchain "My Wallet" cracker patch for JtR. Hacked together during June of
* 2013 by Dhiru Kholia <dhiru at openwall.com>.
*
* See https://blockchain.info/wallet/wallet-format
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* improved dection, added iteration count and handle v2 hashes, Feb, 2015, JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_blockchain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_blockchain);
#else
#include <string.h>
#include <errno.h>
#include "arch.h"
#include "jumbo.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
//#define OMP_SCALE 1 // tuned on core i7
#ifndef OMP_SCALE
#define OMP_SCALE 64 // tuned on AMD K8 dual-HT (XOP)
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Blockchain"
#define FORMAT_NAME "My Wallet"
#define FORMAT_TAG "$blockchain$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 AES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 AES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (x10)"
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define BIG_ENOUGH (8192 * 32)
// increase me (in multiples of 16) to increase the decrypted and search area
#define SAFETY_FACTOR 160
static struct fmt_tests agile_keychain_tests[] = {
{"$blockchain$400$53741f25a90ef521c90bb2fd73673e64089ff2cca6ba3cbf6f34e0f80f960b2f60b9ac48df009dc30c288dcf1ade5f16c70a3536403fc11a68f242ba5ad3fcceae3ca5ecd23905997474260aa1357fc322b1434ffa026ba6ad33707c9ad5260e7230b87d8888a45ddc27513adb30af8755ec0737963ae6bb281318c48f224e9c748f6697f75f63f718bebb3401d6d5f02cf62b1701c205762c2f43119b68771ed10ddab79b5f74f56d611f61f77b8b65b5b5669756017429633118b8e5b8b638667e44154de4cc76468c4200eeebda2711a65333a7e3c423c8241e219cdca5ac47c0d4479444241fa27da20dba1a1d81e778a037d40d33ddea7c39e6d02461d97185f66a73deedff39bc53af0e9b04a3d7bf43648303c9f652d99630cd0789819376d68443c85f0eeb7af7c83eecddf25ea912f7721e3fb73ccaedf860f0f033ffc990ed73db441220d0cbe6e029676fef264dc2dc497f39bedf4041ba355d086134744d5a36e09515d230cd499eb20e0c574fb1bd9d994ce26f53f21d06dd58db4f8e0efbcaee7038df793bbb3daa96", "strongpassword"},
{"$blockchain$384$ece598c58b22a3b245a02039ce36bdf589a86b6344e802b4a3ac9b727cc0b6977e9509bc1ac4d1b7b9cbf9089ecdc89706f0a469325f7ee218b2212b6cd3e32677be20eee91e267fe13ebded02946d4ae1163ef22b3dca327d7390091247ac770288a0c7be181b21a48a8f945d9913cdfdc4cfd739ee3a41ced11cacde22e3233250e36f8b8fb4d81de5298a84374af75b88afda3438eed232e52aa0eb29e0d475456c86ae9d1aaadca14bc25f273c93fd4d7fd8316ed5306733bca77e8214277edd3155342abe0710985dc20b4f80e6620e386aa7658f92df25c7c932f0eb1beca25253662bd558647a3ba741f89450bfdba59a0c016477450fbcecd62226626e06ed2e3f5a4180e32d534c7769bcd1160aad840cfd3b7b13a90d34fedb3408fe74379a9e8a840fe3bfee8e0ee01f77ee389613fa750c3d2771b83eeb4e16598f76c15c311c325bd5d54543571aa20934060e332f451e58d67ad0f4635c0c021fa76821a68d64f1a5fb6fd70365eef4442cedcc91eb8696d52d078807edd89d", "qwertyuiop1"},
/* here is a v2 hash. NOTE, it uses 5000 pbkdf2 for the hash */
{"$blockchain$v2$5000$544$9a4d5157d4969636b2fe0738f77a376feda2fb979738c5cf0e712f5d4a2f001608824a865d25041bc85e0ad35985999fcfae7d218eb109a703781f57e7b5a03c29ffdfb756ec8ee38ed8941b056922cdd174c8e89feb40e1a0e1766792845f57992ae9d7667eff41a5e5580f3f289b050d76cc0f049cbd30c675efc3a553c0f19f30cb9589c7c3773dd095de92a991963789408351f543c1dc307751e5f781c278da77270035e3743df01ab4e41155b6437d9c7e64388a28f8331aca4822e6b89cdd5f45061b99768218d853a3575bbd029564826bcb188d55444273cda588d4e593fc5d29696713d747cfc8302a3e9c9dbb1bb3754c2e00f28b69d8faeb2e45c04085359c6a9b6bfecfd0a6a8f27ad647b6bfd498f2224a8c0442f7fe730656263ac2869923b296ad9955dbad515b4f88ad33619bdacc33ae7f14c65fce029e0f9e4a9c414716d9a23e4361aa264493bb6fc9a7fda82599b0232174b9fc92a1c717ca2cc6deb8bd6aaf3706b95fdfdc582316cb3d271178dafe3a6704a918e07be057bef676bb144840c7f26676f183f2744fc2fe22c9c3feb7461b4383981c00b6fff403fef578f6e5464dc2d0bcb7b8d0dc2e7add502b34c8fe9f9b638eebe7ede25e351b17ea8b8c1f5213b69780c0ba7ef3d5734c0635e9d2ee49524914f047d45536180be25e7610db809db694ceeb16a3bfd8abd5ab0cda4415203408387698fe707568566f7f567164707091a806ac2d11b9b9dd0c3c991ff037f457", "Openwall1234#"},
/* this is the 'raw' hash to the line above. We do not handle this yet, but probably should. It is also mime, and not base-16 */
//{"{\"pbkdf2_iterations\":5000,\"version\":2,\"payload\":\"mk1RV9SWljay/gc493o3b+2i+5eXOMXPDnEvXUovABYIgkqGXSUEG8heCtNZhZmfz659IY6xCacDeB9X57WgPCn/37dW7I7jjtiUGwVpIs3RdMjon+tA4aDhdmeShF9XmSrp12Z+/0Gl5VgPPyibBQ12zA8EnL0wxnXvw6VTwPGfMMuVicfDdz3Qld6SqZGWN4lAg1H1Q8HcMHdR5feBwnjadycANeN0PfAatOQRVbZDfZx+ZDiKKPgzGspIIua4nN1fRQYbmXaCGNhTo1dbvQKVZIJryxiNVURCc82liNTlk/xdKWlnE9dHz8gwKj6cnbsbs3VMLgDyi2nY+usuRcBAhTWcaptr/s/QpqjyetZHtr/UmPIiSowEQvf+cwZWJjrChpkjspatmVXbrVFbT4itM2Gb2swzrn8Uxl/OAp4PnkqcQUcW2aI+Q2GqJkSTu2/Jp/2oJZmwIyF0ufySoccXyizG3ri9aq83Brlf39xYIxbLPScReNr+OmcEqRjge+BXvvZ2uxRIQMfyZnbxg/J0T8L+IsnD/rdGG0ODmBwAtv/0A/71ePblRk3C0Ly3uNDcLnrdUCs0yP6fm2OO6+ft4l41Gxfqi4wfUhO2l4DAun7z1XNMBjXp0u5JUkkU8EfUVTYYC+JedhDbgJ22lM7rFqO/2KvVqwzaRBUgNAg4dpj+cHVoVm9/VnFkcHCRqAasLRG5ud0MPJkf8Df0Vw==\"}", "Openwall1234#"},
{"$blockchain$v2$5000$544$a48c0a5f37986945940bd374f2e473a8f7c04719c04f7e3843f9f58caef832a738f6e3eb48f78ee059495790b0db93d8e2a1bbe9b81cdf6ac278599a30be0a12fcfa341fc29705948b2d885b2e93627ab53f5b67c4294bf2ae59571c04fbedc5a0e65547d356fef8b8090ad8e5744d63224f160b00f898e2583b2abe818454d15878afc11d0aee31f12e0553a84dff23e8e1438a212ae9c51d2c203d6c3e4746cddc94182f83fb8b2f7de79d3493d991f3d8718a58b6af7c2d33d8ef77b76e20bb859b13fad787ea7ad9a057e3ac9697b051c6749e3d3dc9a7699e13b08c7254ad687cf09f005800ab190e13c7cf9b881582b52e6c154e562fe73a723b0b1c0b80be352873c1ab8456a4a0d57bb5185f5c4cb1e150359578344ea8321cc5a0a94807fe06a89742226b2c74e8b6f1653ea84bf79e525fc92ebb7aa9106774e1b9dc794f5280ab2a5df818aeae0e467aeac0083aaea0b1f9d4c754324938caa4e8594aa69f988a0c424ae1fe5e1b91c82bccf6f995ec28d3e300b2eb62daa6ba72b4df46a788d724ec0f1f102d262b6c129ee9cd0d5674d3bc71350091b23a6219ff900653cdb52143b549829330abd15eb1f2d8e742565ed5ede6285908b040b75ca0b1871bbfb8e3a8115afef2ff8c46f180765387fb55e896a9c3a3073f57509a4102dec52d77dbb88f97cf6d83f0834b1dc7c0343a1a6b2144f2d264a3f0c4d9eb014c07fde9f1c1b6cc02fdb2e87583277194332d90b3b491d1a441ed57ce", "johntheripper!"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char data[BIG_ENOUGH];
int length;
int iter;
} *cur_salt;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
cracked = mem_calloc_align(sizeof(*cracked),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len, extra;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL)
goto err;
if (!strcmp(p, "v2")) {
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
}
if (!isdec(p))
goto err;
len = atoi(p);
if(len > BIG_ENOUGH || !len)
goto err;
if ((p = strtokm(NULL, "$")) == NULL)
goto err;
if (hexlenl(p, &extra) != len * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static union {
struct custom_salt _cs;
ARCH_WORD_32 dummy;
} un;
struct custom_salt *cs = &(un._cs);
memset(&un, 0, sizeof(un));
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
if (!strcmp(p, "v2")) {
p = strtokm(NULL, "$");
cs->iter = atoi(p);
p = strtokm(NULL, "$");
} else
cs->iter = 10;
cs->length = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < cs->length; i++)
cs->data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int blockchain_decrypt(unsigned char *derived_key, unsigned char *data)
{
unsigned char out[SAFETY_FACTOR];
AES_KEY akey;
unsigned char iv[16];
memcpy(iv, cur_salt->data, 16);
if(AES_set_decrypt_key(derived_key, 256, &akey) < 0) {
fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n");
}
AES_cbc_encrypt(data + 16, out, 16, &akey, iv, AES_DECRYPT);
/* various tests */
if (out[0] != '{') // fast test
return -1;
// "guid" will be found in the first block
if (memmem(out, 16, "\"guid\"", 6)) {
memcpy(iv, cur_salt->data, 16); //IV has to be reset.
AES_cbc_encrypt(data + 16, out, SAFETY_FACTOR, &akey, iv, AES_DECRYPT);
if (memmem(out, SAFETY_FACTOR, "\"sharedKey\"", 11) &&
memmem(out, SAFETY_FACTOR, "\"options\"", 9))
// Note, we 'could' check that the guid and sharedKey values are
// 'valid' GUID's, but there really is no point. We already have
// 2^216 confidence in the simple text strings being found.
return 0;
}
return -1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, lens,
cur_salt->data, 16, cur_salt->iter, pout, 32, 0);
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if(blockchain_decrypt(master[i], cur_salt->data) == 0)
cracked[i+index] = 1;
else
cracked[i+index] = 0;
}
#else
unsigned char master[32];
pbkdf2_sha1((unsigned char *)saved_key[index],
strlen(saved_key[index]),
cur_salt->data, 16,
cur_salt->iter, master, 32, 0);
if(blockchain_decrypt(master, cur_salt->data) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void agile_keychain_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_blockchain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{ NULL },
{ FORMAT_TAG },
agile_keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
agile_keychain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_int16_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_uint32)
// op(A') function: GB (_unop_tran__identity_int16_uint32)
// C type: int16_t
// A type: uint32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_uint32)
(
int16_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
wshfl.c | /* Copyright 2018-2019. Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2018-2019 Siddharth Iyer <ssi@mit.edu>
*
* Tamir J, Uecker M, Chen W, Lai P, Alley MT, Vasanawala SS, Lustig M.
* T2 shuffling: Sharp, multicontrast, volumetric fast spin‐echo imaging.
* Magnetic resonance in medicine. 2017 Jan 1;77(1):180-95.
*
* B Bilgic, BA Gagoski, SF Cauley, AP Fan, JR Polimeni, PE Grant,
* LL Wald, and K Setsompop, Wave-CAIPI for highly accelerated 3D
* imaging. Magn Reson Med (2014) doi: 10.1002/mrm.25347
*
* Iyer S, Bilgic B, Setsompop K.
* Faster T2 shuffling with Wave.
* Presented in the session: "Signal Encoding and Decoding" at ISMRM 2018.
* https://www.ismrm.org/18/program_files/O67.htm
*/
#include <stdbool.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "num/multind.h"
#include "num/flpmath.h"
#include "num/fft.h"
#include "num/init.h"
#include "num/iovec.h"
#include "num/ops.h"
#include "num/ops_p.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "iter/iter.h"
#include "iter/lsqr.h"
#include "iter/misc.h"
#include "linops/linop.h"
#include "linops/fmac.h"
#include "linops/someops.h"
#include "linops/decompose_complex.h"
#include "misc/debug.h"
#include "misc/mri.h"
#include "misc/utils.h"
#include "misc/mmio.h"
#include "misc/misc.h"
#include "misc/opts.h"
#include "wavelet/wavthresh.h"
#include "lowrank/lrthresh.h"
static const char usage_str[] = "<maps> <wave> <phi> <reorder> <table> <output>";
static const char help_str[] =
"Perform a wave-shuffling reconstruction.\n\n"
"Conventions:\n"
" * (sx, sy, sz) - Spatial dimensions.\n"
" * wx - Extended FOV in READ_DIM due to\n"
" wave's voxel spreading.\n"
" * (nc, md) - Number of channels and ESPIRiT's \n"
" extended-SENSE model operator\n"
" dimensions (or # of maps).\n"
" * (tf, tk) - Turbo-factor and the rank\n"
" of the temporal basis used in\n"
" shuffling.\n"
" * ntr - Number of TRs, or the number of\n"
" (ky, kz) points acquired of one\n"
" echo image.\n"
" * n - Total number of (ky, kz) points\n"
" acquired. This is equal to the\n"
" product of ntr and tf.\n\n"
"Descriptions:\n"
" * reorder is an (n by 3) index matrix such that\n"
" [ky, kz, t] = reorder(i, :) represents the\n"
" (ky, kz) kspace position of the readout line\n"
" acquired at echo number (t), and 0 <= ky < sy,\n"
" 0 <= kz < sz, 0 <= t < tf).\n"
" * table is a (wx by nc by n) matrix such that\n"
" table(:, :, k) represents the kth multichannel\n"
" kspace line.\n\n"
"Expected dimensions:\n"
" * maps - ( sx, sy, sz, nc, md, 1, 1)\n"
" * wave - ( wx, sy, sz, 1, 1, 1, 1)\n"
" * phi - ( 1, 1, 1, 1, 1, tf, tk)\n"
" * output - ( sx, sy, sz, 1, md, 1, tk)\n"
" * reorder - ( n, 3, 1, 1, 1, 1, 1)\n"
" * table - ( wx, nc, n, 1, 1, 1, 1)";
/* Helper function to print out operator dimensions. */
static void print_opdims(const struct linop_s* op)
{
const struct iovec_s* domain = linop_domain(op);
const struct iovec_s* codomain = linop_codomain(op);
debug_printf(DP_INFO, "\tDomain: [");
for (long k = 0; k < domain->N; k ++)
debug_printf(DP_INFO, "%6ld", domain->dims[k]);
debug_printf(DP_INFO, "]\n");
debug_printf(DP_INFO, "\tCodomain: [");
for (long k = 0; k < codomain->N; k ++)
debug_printf(DP_INFO, "%6ld", codomain->dims[k]);
debug_printf(DP_INFO, "]\n");
}
/* Construct sampling mask array from reorder tables. */
static void construct_mask(
long reorder_dims[DIMS], complex float* reorder,
long mask_dims[DIMS], complex float* mask)
{
long n = reorder_dims[0];
long sy = mask_dims[1];
long sz = mask_dims[2];
long y = 0;
long z = 0;
long t = 0;
for (int i = 0; i < n; i++) {
y = lround(creal(reorder[i]));
z = lround(creal(reorder[i + n]));
t = lround(creal(reorder[i + 2 * n]));
mask[(y + z * sy) + t * sy * sz] = 1;
}
}
struct kern_s {
INTERFACE(linop_data_t);
unsigned int N;
long* reorder_dims; // Dimension of the index table: ( n, 3, 1, 1, 1, 1, 1, 1)
long* phi_dims; // Dimension of the temporal basis: ( 1, 1, 1, 1, 1, tf, tk, 1)
long* table_dims; // Dimension of the data table: (wx, nc, n, 1, 1, 1, 1, 1)
long* kernel_dims; // Dimension of the kernel: ( 1, sy, sz, 1, 1, 1, tk, tk)
complex float* reorder;
complex float* phi;
complex float* kernel;
complex float* gpu_kernel;
};
static DEF_TYPEID(kern_s);
/* Go to table from coefficient-kspace with memory efficiency. */
static void kern_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long input_dims[] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, src);
unsigned int permute_order[DIMS] = {0, 3, 5, 6, 1, 2, 4, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, perm_dims, perm, input_dims, src, CFL_SIZE);
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_in_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
complex float* vec = md_alloc_sameplace(4, vec_dims, CFL_SIZE, src);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_in_str[4];
md_calc_strides(4, phi_in_str, phi_in_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
int y = -1;
int z = -1;
int t = -1;
for (int i = 0; i < n; i ++) {
y = lround(creal(data->reorder[i]));
z = lround(creal(data->reorder[i + n]));
t = lround(creal(data->reorder[i + 2 * n]));
md_clear(4, vec_dims, vec, CFL_SIZE);
md_zfmac2(4, fmac_dims, vec_str, vec, phi_in_str, (perm + ((wx * nc * tk) * (y + z * sy))), phi_mat_str, data->phi);
md_copy(4, line_dims, dst + (i * wx * nc), vec + (t * wx * nc), CFL_SIZE);
}
md_free(perm);
md_free(vec);
}
/* Collapse data table into the temporal basis for memory efficiency. */
static void kern_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long n = data->reorder_dims[0];
long tf = data->phi_dims[5];
long tk = data->phi_dims[6];
long perm_dims[] = { [0 ... DIMS - 1] = 1 };
perm_dims[0] = wx;
perm_dims[1] = nc;
perm_dims[3] = tk;
perm_dims[4] = sy;
perm_dims[5] = sz;
complex float* perm = md_alloc_sameplace(DIMS, perm_dims, CFL_SIZE, dst);
md_clear(DIMS, perm_dims, perm, CFL_SIZE);
#ifdef _OPENMP
long num_threads = omp_get_max_threads();
#else
long num_threads = 1;
#endif
long vec_dims[] = {wx, nc, tf, 1};
long phi_mat_dims[] = { 1, 1, tf, tk};
long phi_out_dims[] = {wx, nc, 1, tk};
long fmac_dims[] = {wx, nc, tf, tk};
long line_dims[] = {wx, nc, 1, 1};
long vthrd_dims[] = {wx, nc, tf, 1, num_threads};
complex float* vec = md_alloc_sameplace(5, vthrd_dims, CFL_SIZE, dst);
md_clear(DIMS, vthrd_dims, vec, CFL_SIZE);
long vec_str[4];
md_calc_strides(4, vec_str, vec_dims, CFL_SIZE);
long phi_mat_str[4];
md_calc_strides(4, phi_mat_str, phi_mat_dims, CFL_SIZE);
long phi_out_str[4];
md_calc_strides(4, phi_out_str, phi_out_dims, CFL_SIZE);
long fmac_str[4];
md_calc_strides(4, fmac_str, fmac_dims, CFL_SIZE);
long flag_dims[1] = { n };
complex float* flags = md_calloc(1, flag_dims, CFL_SIZE);
#pragma omp parallel for
for (int k = 0; k < n; k ++) {
#ifdef _OPENMP
int tid = omp_get_thread_num();
#else
int tid = 0;
#endif
int y = lround(creal(data->reorder[k]));
int z = lround(creal(data->reorder[k + n]));
int t = -1;
if (0 == flags[k]) {
md_clear(4, vec_dims, vec + (wx * nc * tf * tid), CFL_SIZE);
for (int i = k; i < n; i ++) {
if ((y == lround(creal(data->reorder[i]))) && (z == lround(creal(data->reorder[i + n])))) {
flags[i] = 1;
t = lround(creal(data->reorder[i + 2 * n]));
md_copy(4, line_dims, (vec + (wx * nc * tf * tid) + t * wx * nc), (src + i * wx * nc), CFL_SIZE);
}
}
md_zfmacc2(4, fmac_dims, phi_out_str, perm + (y + z * sy) * (wx * nc * tk), vec_str, vec + (wx * nc * tf * tid), phi_mat_str, data->phi);
}
}
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = wx;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = nc;
out_dims[6] = tk;
unsigned int permute_order[DIMS] = {0, 4, 5, 1, 6, 2, 3, 7};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, out_dims, dst, perm_dims, perm, CFL_SIZE);
md_free(vec);
md_free(perm);
md_free(flags);
}
static void kern_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
long wx = data->table_dims[0];
long sy = data->kernel_dims[1];
long sz = data->kernel_dims[2];
long nc = data->table_dims[1];
long tk = data->phi_dims[6];
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = wx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long input_str[DIMS];
md_calc_strides(DIMS, input_str, input_dims, CFL_SIZE);
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[6] = 1;
output_dims[7] = tk;
long output_str[DIMS];
md_calc_strides(DIMS, output_str, output_dims, CFL_SIZE);
long gpu_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, gpu_kernel_dims, data->kernel_dims);
gpu_kernel_dims[0] = wx;
gpu_kernel_dims[3] = nc;
long kernel_str[DIMS];
md_calc_strides(DIMS, kernel_str, data->kernel_dims, CFL_SIZE);
long gpu_kernel_str[DIMS];
md_calc_strides(DIMS, gpu_kernel_str, gpu_kernel_dims, CFL_SIZE);
long fmac_dims[DIMS];
md_merge_dims(DIMS, fmac_dims, input_dims, data->kernel_dims);
md_clear(DIMS, output_dims, dst, CFL_SIZE);
#ifdef USE_CUDA
if(cuda_ondevice(src))
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, gpu_kernel_str, data->gpu_kernel);
else
#endif
md_zfmac2(DIMS, fmac_dims, output_str, dst, input_str, src, kernel_str, data->kernel);
}
static void kern_free(const linop_data_t* _data)
{
const struct kern_s* data = CAST_DOWN(kern_s, _data);
xfree(data->reorder_dims);
xfree(data->phi_dims);
xfree(data->table_dims);
xfree(data->kernel_dims);
#ifdef USE_CUDA
if (data->gpu_kernel != NULL)
md_free(data->gpu_kernel);
#endif
xfree(data);
}
static const struct linop_s* linop_kern_create(bool gpu_flag,
const long _reorder_dims[DIMS], complex float* reorder,
const long _phi_dims[DIMS], complex float* phi,
const long _kernel_dims[DIMS], complex float* kernel,
const long _table_dims[DIMS])
{
PTR_ALLOC(struct kern_s, data);
SET_TYPEID(kern_s, data);
PTR_ALLOC(long[DIMS], reorder_dims);
PTR_ALLOC(long[DIMS], phi_dims);
PTR_ALLOC(long[DIMS], table_dims);
PTR_ALLOC(long[DIMS], kernel_dims);
md_copy_dims(DIMS, *reorder_dims, _reorder_dims);
md_copy_dims(DIMS, *phi_dims, _phi_dims);
md_copy_dims(DIMS, *table_dims, _table_dims);
md_copy_dims(DIMS, *kernel_dims, _kernel_dims);
data->reorder_dims = *PTR_PASS(reorder_dims);
data->phi_dims = *PTR_PASS(phi_dims);
data->table_dims = *PTR_PASS(table_dims);
data->kernel_dims = *PTR_PASS(kernel_dims);
data->reorder = reorder;
data->phi = phi;
data->kernel = kernel;
data->gpu_kernel = NULL;
#ifdef USE_CUDA
if(gpu_flag) {
long repmat_kernel_dims[DIMS] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, repmat_kernel_dims, _kernel_dims);
repmat_kernel_dims[0] = _table_dims[0];
repmat_kernel_dims[3] = _table_dims[1];
long kernel_strs[DIMS];
long repmat_kernel_strs[DIMS];
md_calc_strides(DIMS, kernel_strs, _kernel_dims, CFL_SIZE);
md_calc_strides(DIMS, repmat_kernel_strs, repmat_kernel_dims, CFL_SIZE);
complex float* repmat_kernel = md_calloc(DIMS, repmat_kernel_dims, CFL_SIZE);
md_copy2(DIMS, repmat_kernel_dims, repmat_kernel_strs, repmat_kernel, kernel_strs, kernel, CFL_SIZE);
data->gpu_kernel = md_gpu_move(DIMS, repmat_kernel_dims, repmat_kernel, CFL_SIZE);
md_free(repmat_kernel);
}
#else
UNUSED(gpu_flag);
#endif
long input_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
input_dims[0] = _table_dims[0];
input_dims[1] = _kernel_dims[1];
input_dims[2] = _kernel_dims[2];
input_dims[3] = _table_dims[1];
input_dims[6] = _phi_dims[6];
long output_dims[DIMS] = { [0 ... DIMS - 1] = 1 };
output_dims[0] = _table_dims[0];
output_dims[1] = _table_dims[1];
output_dims[2] = _reorder_dims[0];
const struct linop_s* K = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), kern_apply, kern_adjoint, kern_normal, NULL, kern_free);
return K;
}
struct multc_s {
INTERFACE(linop_data_t);
unsigned int nc;
unsigned int md;
const complex float* maps;
const struct linop_s* sc_op; // Single channel operator.
};
static DEF_TYPEID(multc_s);
static void multc_apply(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* fwd = data->sc_op->forward;
const long* sc_inp_dims = linop_domain(data->sc_op)->dims;
const long* sc_out_dims = linop_codomain(data->sc_op)->dims;
long sx = sc_inp_dims[0];
long sy = sc_inp_dims[1];
long sz = sc_inp_dims[2];
long wx = sc_out_dims[0];
long n = sc_out_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[MAPS_DIM] = md;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[1] = nc;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer = md_alloc_sameplace(DIMS, sc_inp_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[1] = n;
tbl_dims[2] = nc;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
long pos[] = { [0 ... DIMS - 1] = 0 };
long zfmac_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, zfmac_dims, src_dims);
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_src[DIMS];
md_calc_strides(DIMS, strides_src, src_dims, CFL_SIZE);
long strides_sc_inp[DIMS];
md_calc_strides(DIMS, strides_sc_inp, sc_inp_dims, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_inp_dims, buffer, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, zfmac_dims, strides_sc_inp, buffer, strides_src, src, strides_single_map, single_map);
operator_apply(fwd, DIMS, sc_out_dims, tbl + (wx * n * k), DIMS, sc_inp_dims, buffer);
}
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
unsigned int permute_order[DIMS] = {0, 2, 1};
for (unsigned int i = 3; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, dst_dims, dst, tbl_dims, tbl, CFL_SIZE);
md_free(single_map);
md_free(buffer);
md_free(tbl);
}
static void multc_adjoint(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* adj = data->sc_op->adjoint;
const long* sc_inp_dims = linop_codomain(data->sc_op)->dims;
const long* sc_out_dims = linop_domain(data->sc_op)->dims;
long sx = sc_out_dims[0];
long sy = sc_out_dims[1];
long sz = sc_out_dims[2];
long wx = sc_inp_dims[0];
long n = sc_inp_dims[2];
long nc = data->nc;
long md = data->md;
long src_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, src_dims, sc_inp_dims);
src_dims[1] = nc;
long dst_dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dst_dims, sc_out_dims);
dst_dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_out_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, dst_dims, CFL_SIZE, src);
long tbl_dims[] = { [0 ... DIMS - 1] = 1};
tbl_dims[0] = wx;
tbl_dims[2] = n;
complex float* tbl = md_alloc_sameplace(DIMS, tbl_dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc_out[DIMS];
md_calc_strides(DIMS, strides_sc_out, sc_out_dims, CFL_SIZE);
long strides_dst[DIMS];
md_calc_strides(DIMS, strides_dst, dst_dims, CFL_SIZE);
md_clear(DIMS, dst_dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_out_dims, buffer1, CFL_SIZE);
md_clear(DIMS, dst_dims, buffer2, CFL_SIZE);
md_clear(DIMS, tbl_dims, tbl, CFL_SIZE);
pos[1] = k;
md_slice(DIMS, 2, pos, src_dims, tbl, src, CFL_SIZE);
pos[1] = 0;
operator_apply(adj, DIMS, sc_out_dims, buffer1, DIMS, tbl_dims, tbl);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmacc2(DIMS, dst_dims, strides_dst, buffer2, strides_sc_out, buffer1, strides_single_map, single_map);
md_zadd(DIMS, dst_dims, dst, dst, buffer2);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(tbl);
}
static void multc_normal(const linop_data_t* _data, complex float* dst, const complex float* src)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
// Loading single channel operator.
const struct operator_s* nrm = data->sc_op->normal;
const long* sc_dims = linop_domain(data->sc_op)->dims;
long sx = sc_dims[0];
long sy = sc_dims[1];
long sz = sc_dims[2];
long nc = data->nc;
long md = data->md;
long dims[] = { [0 ... DIMS - 1] = 1};
md_copy_dims(DIMS, dims, sc_dims);
dims[MAPS_DIM] = md;
long map_dims[] = { [0 ... DIMS - 1] = 1};
map_dims[0] = sx;
map_dims[1] = sy;
map_dims[2] = sz;
map_dims[3] = nc;
map_dims[4] = md;
long single_map_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_map_dims, map_dims);
single_map_dims[COIL_DIM] = 1;
complex float* single_map = md_alloc_sameplace(DIMS, single_map_dims, CFL_SIZE, src);
complex float* buffer1 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer2 = md_alloc_sameplace(DIMS, sc_dims, CFL_SIZE, src);
complex float* buffer3 = md_alloc_sameplace(DIMS, dims, CFL_SIZE, src);
long pos[] = { [0 ... DIMS - 1] = 0 };
long strides_single_map[DIMS];
md_calc_strides(DIMS, strides_single_map, single_map_dims, CFL_SIZE);
long strides_sc[DIMS];
md_calc_strides(DIMS, strides_sc, sc_dims, CFL_SIZE);
long strides[DIMS];
md_calc_strides(DIMS, strides, dims, CFL_SIZE);
md_clear(DIMS, dims, dst, CFL_SIZE);
for (long k = 0; k < data->nc; k++) {
md_clear(DIMS, single_map_dims, single_map, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer1, CFL_SIZE);
md_clear(DIMS, sc_dims, buffer2, CFL_SIZE);
md_clear(DIMS, dims, buffer3, CFL_SIZE);
pos[COIL_DIM] = k;
md_slice(DIMS, COIL_FLAG, pos, map_dims, single_map, data->maps, CFL_SIZE);
pos[COIL_DIM] = 0;
md_zfmac2(DIMS, dims, strides_sc, buffer1, strides, src, strides_single_map, single_map);
operator_apply(nrm, DIMS, sc_dims, buffer2, DIMS, sc_dims, buffer1);
md_zfmacc2(DIMS, dims, strides, buffer3, strides_sc, buffer2, strides_single_map, single_map);
md_zadd(DIMS, dims, dst, dst, buffer3);
}
md_free(single_map);
md_free(buffer1);
md_free(buffer2);
md_free(buffer3);
}
static void multc_free(const linop_data_t* _data)
{
const struct multc_s* data = CAST_DOWN(multc_s, _data);
xfree(data);
}
static struct linop_s* linop_multc_create(long nc, long md, const complex float* maps, const struct linop_s* sc_op)
{
PTR_ALLOC(struct multc_s, data);
SET_TYPEID(multc_s, data);
data->nc = nc;
data->md = md;
data->maps = maps;
data->sc_op = sc_op;
long* op_inp_dims = (long*) linop_domain(sc_op)->dims;
long* op_out_dims = (long*) linop_codomain(sc_op)->dims;
long input_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, input_dims, op_inp_dims);
input_dims[MAPS_DIM] = md;
long output_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, output_dims, op_out_dims);
output_dims[1] = nc;
struct linop_s* E = linop_create(DIMS, output_dims, DIMS, input_dims, CAST_UP(PTR_PASS(data)), multc_apply, multc_adjoint, multc_normal, NULL, multc_free);
return E;
}
/* Resize operator. */
static const struct linop_s* linop_wavereshape_create(long wx, long sx, long sy, long sz, long nc, long tk)
{
long input_dims[] = { [0 ... DIMS - 1] = 1};
input_dims[0] = sx;
input_dims[1] = sy;
input_dims[2] = sz;
input_dims[3] = nc;
input_dims[6] = tk;
long output_dims[DIMS];
md_copy_dims(DIMS, output_dims, input_dims);
output_dims[0] = wx;
struct linop_s* R = linop_resize_create(DIMS, output_dims, input_dims);
return R;
}
/* Fx operator. */
static const struct linop_s* linop_fx_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fx = NULL;
if (centered)
Fx = linop_fftc_create(DIMS, dims, READ_FLAG);
else
Fx = linop_fft_create(DIMS, dims, READ_FLAG);
return Fx;
}
/* Wave operator. */
static const struct linop_s* linop_wave_create(long wx, long sy, long sz, long nc, long tk, complex float* psf)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* W = linop_cdiag_create(DIMS, dims, FFT_FLAGS, psf);
return W;
}
/* Fyz operator. */
static const struct linop_s* linop_fyz_create(long wx, long sy, long sz, long nc, long tk, bool centered)
{
long dims[] = { [0 ... DIMS - 1] = 1};
dims[0] = wx;
dims[1] = sy;
dims[2] = sz;
dims[3] = nc;
dims[6] = tk;
struct linop_s* Fyz = NULL;
if (centered)
Fyz = linop_fftc_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
else
Fyz = linop_fft_create(DIMS, dims, PHS1_FLAG|PHS2_FLAG);
return Fyz;
}
/* Construction sampling temporal kernel.*/
static void construct_kernel(
long mask_dims[DIMS], complex float* mask,
long phi_dims[DIMS], complex float* phi,
long kern_dims[DIMS], complex float* kern)
{
long sy = mask_dims[1];
long sz = mask_dims[2];
long tf = phi_dims[5];
long tk = phi_dims[6];
long cvec_dims[] = { [0 ... DIMS - 1] = 1 };
cvec_dims[6] = tk;
long cvec_str[DIMS];
md_calc_strides(DIMS, cvec_str, cvec_dims, CFL_SIZE);
complex float cvec[tk];
long tvec_dims[] = { [0 ... DIMS - 1] = 1 };
tvec_dims[5] = tf;
long tvec_str[DIMS];
md_calc_strides(DIMS, tvec_str, tvec_dims, CFL_SIZE);
complex float mvec[tf];
complex float tvec1[tf];
complex float tvec2[tf];
long phi_str[DIMS];
md_calc_strides(DIMS, phi_str, phi_dims, CFL_SIZE);
long out_dims[] = { [0 ... DIMS - 1] = 1 };
out_dims[0] = tk;
out_dims[1] = sy;
out_dims[2] = sz;
out_dims[3] = tk;
complex float* out = md_calloc(DIMS, out_dims, CFL_SIZE);
for (int y = 0; y < sy; y ++) {
for (int z = 0; z < sz; z ++) {
for (int t = 0; t < tf; t ++)
mvec[t] = mask[(y + sy * z) + (sy * sz) * t];
for (int t = 0; t < tk; t ++) {
cvec[t] = 1;
md_clear(DIMS, tvec_dims, tvec1, CFL_SIZE);
md_zfmac2(DIMS, phi_dims, tvec_str, tvec1, cvec_str, cvec, phi_str, phi);
md_clear(DIMS, tvec_dims, tvec2, CFL_SIZE);
md_zfmac2(DIMS, tvec_dims, tvec_str, tvec2, tvec_str, tvec1, tvec_str, mvec);
md_clear(DIMS, cvec_dims, out + y * tk + z * sy * tk + t * sy * sz * tk, CFL_SIZE);
md_zfmacc2(DIMS, phi_dims, cvec_str, out + y * tk + z * sy * tk + t * sy * sz * tk,
tvec_str, tvec2, phi_str, phi);
cvec[t] = 0;
}
}
}
unsigned int permute_order[DIMS] = {4, 1, 2, 5, 6, 7, 3, 0};
for (unsigned int i = 8; i < DIMS; i++)
permute_order[i] = i;
md_permute(DIMS, permute_order, kern_dims, kern, out_dims, out, CFL_SIZE);
md_free(out);
}
static void fftmod_apply(long sy, long sz,
long reorder_dims[DIMS], complex float* reorder,
long table_dims[DIMS], complex float* table,
long maps_dims[DIMS], complex float* maps)
{
long wx = table_dims[0];
long nc = table_dims[1];
fftmod(DIMS, table_dims, READ_FLAG, table, table);
fftmod(DIMS, maps_dims, FFT_FLAGS, maps, maps);
long y = -1;
long z = -1;
double dy = ((double) sy/2)/((double) sy);
double dz = ((double) sz/2)/((double) sz);
complex float py = 1;
complex float pz = 1;
long dims[] = { [0 ... DIMS] = 1};
dims[0] = wx;
dims[1] = nc;
long n = reorder_dims[0];
for (long k = 0; k < n; k++) {
y = lround(creal(reorder[k]));
z = lround(creal(reorder[k + n]));
py = cexp(2.i * M_PI * dy * y);
pz = cexp(2.i * M_PI * dz * z);
md_zsmul(DIMS, dims, table + k * wx * nc, table + k * wx * nc, py * pz);
}
}
enum algo_t { CG, IST, FISTA };
int main_wshfl(int argc, char* argv[])
{
double start_time = timestamp();
float lambda = 1E-5;
int maxiter = 300;
int blksize = 8;
float step = 0.5;
float tol = 1.E-3;
bool llr = false;
bool wav = false;
bool fista = false;
bool hgwld = false;
float cont = 1;
float eval = -1;
const char* fwd = NULL;
const char* x0 = NULL;
int gpun = -1;
bool dcx = false;
bool pf = false;
const struct opt_s opts[] = {
OPT_FLOAT( 'r', &lambda, "lambda", "Soft threshold lambda for wavelet or locally low rank."),
OPT_INT( 'b', &blksize, "blkdim", "Block size for locally low rank."),
OPT_INT( 'i', &maxiter, "mxiter", "Maximum number of iterations."),
OPT_FLOAT( 's', &step, "stepsz", "Step size for iterative method."),
OPT_FLOAT( 'c', &cont, "cntnu", "Continuation value for IST/FISTA."),
OPT_FLOAT( 't', &tol, "toler", "Tolerance convergence condition for iterative method."),
OPT_FLOAT( 'e', &eval, "eigvl", "Maximum eigenvalue of normal operator, if known."),
OPT_STRING('F', &fwd, "frwrd", "Go from shfl-coeffs to data-table. Pass in coeffs path."),
OPT_STRING('O', &x0, "initl", "Initialize reconstruction with guess."),
OPT_INT( 'g', &gpun, "gpunm", "GPU device number."),
OPT_SET( 'f', &fista, "Reconstruct using FISTA instead of IST."),
OPT_SET( 'H', &hgwld, "Use hogwild in IST/FISTA."),
OPT_SET( 'v', &dcx, "Split coefficients to real and imaginary components."),
OPT_SET( 'w', &wav, "Use wavelet."),
OPT_SET( 'l', &llr, "Use locally low rank across temporal coefficients."),
OPT_SET( 'p', &pf, "Use locally low rank and real-imaginary components for partial fourier."),
};
cmdline(&argc, argv, 6, 6, usage_str, help_str, ARRAY_SIZE(opts), opts);
if (pf)
dcx = true;
debug_printf(DP_INFO, "Loading data... ");
long maps_dims[DIMS];
complex float* maps = load_cfl(argv[1], DIMS, maps_dims);
long wave_dims[DIMS];
complex float* wave = load_cfl(argv[2], DIMS, wave_dims);
long phi_dims[DIMS];
complex float* phi = load_cfl(argv[3], DIMS, phi_dims);
long reorder_dims[DIMS];
complex float* reorder = load_cfl(argv[4], DIMS, reorder_dims);
long table_dims[DIMS];
complex float* table = load_cfl(argv[5], DIMS, table_dims);
debug_printf(DP_INFO, "Done.\n");
if (gpun >= 0)
num_init_gpu_device(gpun);
else
num_init();
int wx = wave_dims[0];
int sx = maps_dims[0];
int sy = maps_dims[1];
int sz = maps_dims[2];
int nc = maps_dims[3];
int md = maps_dims[4];
int tf = phi_dims[5];
int tk = phi_dims[6];
debug_printf(DP_INFO, "Constructing sampling mask from reorder table... ");
long mask_dims[] = { [0 ... DIMS - 1] = 1 };
mask_dims[1] = sy;
mask_dims[2] = sz;
mask_dims[5] = tf;
complex float* mask = md_calloc(DIMS, mask_dims, CFL_SIZE);
construct_mask(reorder_dims, reorder, mask_dims, mask);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Constructing sampling-temporal kernel... ");
long kernel_dims[] = { [0 ... DIMS - 1] = 1 };
kernel_dims[1] = sy;
kernel_dims[2] = sz;
kernel_dims[6] = tk;
kernel_dims[7] = tk;
complex float* kernel = md_calloc(DIMS, kernel_dims, CFL_SIZE);
construct_kernel(mask_dims, mask, phi_dims, phi, kernel_dims, kernel);
md_free(mask);
debug_printf(DP_INFO, "Done.\n");
long coeff_dims[] = { [0 ... DIMS - 1] = 1 };
coeff_dims[0] = sx;
coeff_dims[1] = sy;
coeff_dims[2] = sz;
coeff_dims[4] = md;
coeff_dims[6] = tk;
coeff_dims[8] = dcx ? 2 : 1;
debug_printf(DP_INFO, "Creating single channel linear operators:\n");
double t1;
double t2;
t1 = timestamp();
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
t2 = timestamp();
debug_printf(DP_INFO, "\tR: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fx = linop_fx_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFx: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave);
t2 = timestamp();
debug_printf(DP_INFO, "\tW: %f seconds.\n", t2 - t1);
t1 = timestamp();
const struct linop_s* Fyz = linop_fyz_create(wx, sy, sz, 1, tk, false);
t2 = timestamp();
debug_printf(DP_INFO, "\tFyz: %f seconds.\n", t2 - t1);
t1 = timestamp();
long single_channel_table_dims[] = { [0 ... DIMS - 1] = 1 };
md_copy_dims(DIMS, single_channel_table_dims, table_dims);
single_channel_table_dims[1] = 1;
const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
t2 = timestamp();
debug_printf(DP_INFO, "\tK: %f seconds.\n", t2 - t1);
struct linop_s* A_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, Fx), W), Fyz), K);
debug_printf(DP_INFO, "Single channel forward operator information:\n");
print_opdims(A_sc);
if (eval < 0)
#ifdef USE_CUDA
eval = (gpun >= 0) ? estimate_maxeigenval_gpu(A_sc->normal) : estimate_maxeigenval(A_sc->normal);
#else
eval = estimate_maxeigenval(A_sc->normal);
#endif
debug_printf(DP_INFO, "\tMax eval: %.2e\n", eval);
step /= eval;
struct linop_s* A = linop_multc_create(nc, md, maps, A_sc);
debug_printf(DP_INFO, "Overall forward linear operator information:\n");
print_opdims(A);
if (fwd != NULL) {
debug_printf(DP_INFO, "Going from coefficients to data table... ");
complex float* coeffs_to_fwd = load_cfl(fwd, DIMS, coeff_dims);
complex float* table_forward = create_cfl(argv[6], DIMS, table_dims);
const struct linop_s* R = linop_wavereshape_create(wx, sx, sy, sz, 1, tk);
const struct linop_s* CFx = linop_fx_create( wx, sy, sz, 1, tk, true);
const struct linop_s* W = linop_wave_create(wx, sy, sz, 1, tk, wave);
const struct linop_s* CFyz = linop_fyz_create(wx, sy, sz, 1, tk, true);
const struct linop_s* K = linop_kern_create(gpun >= 0, reorder_dims, reorder, phi_dims, phi, kernel_dims, kernel, single_channel_table_dims);
struct linop_s* AC_sc = linop_chain_FF(linop_chain_FF(linop_chain_FF(linop_chain_FF(
R, CFx), W), CFyz), K);
struct linop_s* AC = linop_multc_create(nc, md, maps, AC_sc);
operator_apply(AC->forward, DIMS, table_dims, table_forward, DIMS, coeff_dims, coeffs_to_fwd);
debug_printf(DP_INFO, "Done.\n");
debug_printf(DP_INFO, "Cleaning up... ");
linop_free(AC);
linop_free(AC_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, table_dims, table_forward);
debug_printf(DP_INFO, "Done.\n");
return 0;
}
if (dcx) {
debug_printf(DP_INFO, "\tSplitting result into real and imaginary components.\n");
struct linop_s* tmp = A;
struct linop_s* dcxop = linop_decompose_complex_create(DIMS, ITER_DIM, linop_domain(A)->dims);
A = linop_chain(dcxop, tmp);
linop_free(dcxop);
linop_free(tmp);
}
debug_printf(DP_INFO, "Normalizing data table and applying fftmod to table and maps... ");
float norm = md_znorm(DIMS, table_dims, table);
md_zsmul(DIMS, table_dims, table, table, 1. / norm);
fftmod_apply(sy, sz, reorder_dims, reorder, table_dims, table, maps_dims, maps);
debug_printf(DP_INFO, "Done.\n");
const struct operator_p_s* T = NULL;
long blkdims[MAX_LEV][DIMS];
long minsize[] = { [0 ... DIMS - 1] = 1 };
minsize[0] = MIN(sx, 16);
minsize[1] = MIN(sy, 16);
minsize[2] = MIN(sz, 16);
unsigned int WAVFLAG = (sx > 1) * READ_FLAG | (sy > 1) * PHS1_FLAG | (sz > 2) * PHS2_FLAG;
enum algo_t algo = CG;
if ((wav) || (llr) || (pf)) {
algo = (fista) ? FISTA : IST;
if (wav) {
debug_printf(DP_INFO, "Creating wavelet threshold operator... ");
T = prox_wavelet_thresh_create(DIMS, coeff_dims, WAVFLAG, 0u, minsize, lambda, true);
} else if (llr) {
debug_printf(DP_INFO, "Creating locally low rank threshold operator across coeff and real-imag... ");
llr_blkdims(blkdims, ~(COEFF_FLAG | ITER_FLAG), coeff_dims, blksize);
T = lrthresh_create(coeff_dims, true, ~(COEFF_FLAG | ITER_FLAG), (const long (*)[])blkdims, lambda, false, false, false);
} else {
assert(dcx);
debug_printf(DP_INFO, "Creating locally low rank threshold operator across real-imag... ");
llr_blkdims(blkdims, ~ITER_FLAG, coeff_dims, blksize);
T = lrthresh_create(coeff_dims, true, ~ITER_FLAG, (const long (*)[])blkdims, lambda, false, false, false);
}
debug_printf(DP_INFO, "Done.\n");
}
italgo_fun2_t italgo = iter2_call_iter;
struct iter_call_s iter2_data;
SET_TYPEID(iter_call_s, &iter2_data);
iter_conf* iconf = CAST_UP(&iter2_data);
struct iter_conjgrad_conf cgconf = iter_conjgrad_defaults;
struct iter_fista_conf fsconf = iter_fista_defaults;
struct iter_ist_conf isconf = iter_ist_defaults;
switch(algo) {
case IST:
debug_printf(DP_INFO, "Using IST.\n");
debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda);
debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter);
debug_printf(DP_INFO, "\tStep size: %0.2e\n", step);
debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld);
debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol);
debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont);
isconf = iter_ist_defaults;
isconf.step = step;
isconf.maxiter = maxiter;
isconf.tol = tol;
isconf.continuation = cont;
isconf.hogwild = hgwld;
iter2_data.fun = iter_ist;
iter2_data._conf = CAST_UP(&isconf);
break;
case FISTA:
debug_printf(DP_INFO, "Using FISTA.\n");
debug_printf(DP_INFO, "\tLambda: %0.2e\n", lambda);
debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter);
debug_printf(DP_INFO, "\tStep size: %0.2e\n", step);
debug_printf(DP_INFO, "\tHogwild: %d\n", (int) hgwld);
debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol);
debug_printf(DP_INFO, "\tContinuation: %0.2e\n", cont);
fsconf = iter_fista_defaults;
fsconf.maxiter = maxiter;
fsconf.step = step;
fsconf.hogwild = hgwld;
fsconf.tol = tol;
fsconf.continuation = cont;
iter2_data.fun = iter_fista;
iter2_data._conf = CAST_UP(&fsconf);
break;
default:
case CG:
debug_printf(DP_INFO, "Using CG.\n");
debug_printf(DP_INFO, "\tMaximum iterations: %d\n", maxiter);
debug_printf(DP_INFO, "\tTolerance: %0.2e\n", tol);
cgconf = iter_conjgrad_defaults;
cgconf.maxiter = maxiter;
cgconf.l2lambda = 0;
cgconf.tol = tol;
iter2_data.fun = iter_conjgrad;
iter2_data._conf = CAST_UP(&cgconf);
break;
}
complex float* init = NULL;
if (x0 != NULL) {
debug_printf(DP_INFO, "Loading in initial guess... ");
init = load_cfl(x0, DIMS, coeff_dims);
debug_printf(DP_INFO, "Done.\n");
}
debug_printf(DP_INFO, "Reconstruction... ");
complex float* recon = create_cfl(argv[6], DIMS, coeff_dims);
struct lsqr_conf lsqr_conf = { 0., gpun >= 0 };
double recon_start = timestamp();
const struct operator_p_s* J = lsqr2_create(&lsqr_conf, italgo, iconf, (const float*) init, A, NULL, 1, &T, NULL, NULL);
operator_p_apply(J, 1., DIMS, coeff_dims, recon, DIMS, table_dims, table);
double recon_end = timestamp();
debug_printf(DP_INFO, "Done.\nReconstruction time: %f seconds.\n", recon_end - recon_start);
debug_printf(DP_INFO, "Cleaning up and saving result... ");
operator_p_free(J);
linop_free(A);
linop_free(A_sc);
md_free(kernel);
unmap_cfl(DIMS, maps_dims, maps);
unmap_cfl(DIMS, wave_dims, wave);
unmap_cfl(DIMS, phi_dims, phi);
unmap_cfl(DIMS, reorder_dims, reorder);
unmap_cfl(DIMS, table_dims, table);
unmap_cfl(DIMS, coeff_dims, recon);
if (x0 != NULL)
unmap_cfl(DIMS, coeff_dims, init);
debug_printf(DP_INFO, "Done.\n");
double end_time = timestamp();
debug_printf(DP_INFO, "Total time: %f seconds.\n", end_time - start_time);
return 0;
}
|
9377.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#define EXTRALARGE_DATASET
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "correlation.h"
/* Array initialization. */
static
void init_array (int m,
int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_correlation(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m),
DATA_TYPE POLYBENCH_1D(stddev,M,m))
{
int i, j, j1, j2;
DATA_TYPE eps = 0.1f;
#define sqrt_of_array_cell(x,j) sqrt(x[j])
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(4)
{
#pragma omp for schedule(static, 16)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Determine standard deviations of column vectors of data matrix. */
#pragma omp for schedule(static, 16)
for (j = 0; j < _PB_M; j++)
{
stddev[j] = 0.0;
for (i = 0; i < _PB_N; i++)
stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]);
stddev[j] /= float_n;
stddev[j] = sqrt_of_array_cell(stddev, j);
/* The following in an inelegant but usual way to handle
near-zero std. dev. values, which below would cause a zero-
divide. */
stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j];
}
/* Center and reduce the column vectors. */
#pragma omp for schedule(static, 16)
for (i = 0; i < _PB_N; i++)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
data[i][j] /= sqrt(float_n) * stddev[j];
}
/* Calculate the m * m correlation matrix. */
#pragma omp for schedule(static, 16)
for (j1 = 0; j1 < _PB_M-1; j1++)
{
symmat[j1][j1] = 1.0;
for (j2 = j1+1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += (data[i][j1] * data[i][j2]);
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
symmat[_PB_M-1][_PB_M-1] = 1.0;
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
588f87_so4_icc_advfsg.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r34)[y_size + 1][z_size + 1];
posix_memalign((void **)&r34, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r35)[y_size + 1][z_size + 1];
posix_memalign((void **)&r35, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
//printf(" Timestep tw: %d, Updating x: %d y: %d \n", xb, yb);
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 1) % (3), t2 = (time + 2) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0((float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, x_size, y_size, z_size, time, t0, x0_blk0_size, x_M, x_m - 1, y0_blk0_size, y_M, y_m - 1, z_M, z_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF0 - 1 IS OVER");
/*==============================================*/
bf1(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, (float *)r34, (float *)r35, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x_size, y_size, z_size, time, t0, t1, t2, x0_blk0_size, x_M, x_m, y0_blk0_size, y_M, y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, tw);
//printf("\n BF1 - 1 IS OVER");
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
free(r21);
free(r20);
free(r19);
free(r18);
free(r17);
free(r34);
free(r35);
return 0;
}
void bf0(float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
//printf(" Change of inner x0_blk0 %d \n", x0_blk0);
for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d \n", tw, x - time + 1);
for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++)
{
// printf(" bf0 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 1, y - time + 1);
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
//printf(" bf0 Updating x: %d y: %d z: %d \n", x - time + 1, y - time + 1, z + 1);
float r39 = -v[t0][x - time + 4][y - time + 4][z + 4];
r35[x - time + 1][y - time + 1][z + 1] = 1.0e-1F * (-(r39 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
float r40 = -u[t0][x - time + 4][y - time + 4][z + 4];
r34[x - time + 1][y - time + 1][z + 1] = 1.0e-1F * (-(r40 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
}
}
}
}
}
}
}
void bf1(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, float *restrict r34_vec, float *restrict r35_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x_size, const int y_size, const int z_size, const int time, const int t0, const int t1, const int t2, const int x1_blk0_size, const int x_M, const int x_m, const int y1_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict r34)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r34_vec;
float(*restrict r35)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r35_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
//printf("In bf1 \n");
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x1_blk0 = max((x_m + time), xb - 0); x1_blk0 <= +min((x_M + time), (xb - 0 + xb_size)); x1_blk0 += x1_blk0_size)
{
//printf(" Change of inner x1_blk0 %d \n", x1_blk0);
for (int y1_blk0 = max((y_m + time), yb - 0); y1_blk0 <= +min((y_M + time), (yb - 0 + yb_size)); y1_blk0 += y1_blk0_size)
{
for (int x = x1_blk0; x <= min(min((x_M + time), (xb - 0 + xb_size - 1)), (x1_blk0 + x1_blk0_size - 1)); x++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d \n", tw, x - time + 4);
for (int y = y1_blk0; y <= min(min((y_M + time), (yb - 0 + yb_size - 1)), (y1_blk0 + y1_blk0_size - 1)); y++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d \n", tw, x - time + 4, y - time + 4);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
//printf(" bf1 Updating x: %d y: %d z: %d \n", x - time + 4, y - time + 4, z + 4);
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x - time + 1][y - time + 1][z] * r35[x - time + 1][y - time + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[x - time + 1][y - time][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[x - time][y - time + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[x - time + 1][y - time + 1][z + 1];
float r43 = pow(vp[x - time + 4][y - time + 4][z + 4], -2);
float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[x - time + 1][y - time + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[x - time + 1][y - time][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[x - time][y - time + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[x - time + 1][y - time + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t2][x - time + 4][y - time + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t2][x - time + 4][y - time + 4][z + 4]);
u[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4]));
v[t1][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4]));
}
//int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r22 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
u[t1][x - time + 4][y - time + 4][zind + 4] += r22;
float r23 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
//#pragma omp atomic update
v[t1][x - time + 4][y - time + 4][zind + 4] += r23;
//printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23);
}
}
}
}
}
}
}
|
ej3.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <unistd.h>
#define TAM 4096
void rellenarArray(float *V){
for(int i=0;i<TAM;++i)
*(V+i)=100.0f;
}
int main() {
int number;
double start;
float *a = (float *)malloc(sizeof(float)*TAM);
float *b = (float *)malloc(sizeof(float)*TAM);
float *c = (float *)malloc(sizeof(float)*TAM);
#pragma omp parallel num_threads(2)
{
#pragma omp sections
{
#pragma omp section
rellenarArray(a);
#pragma omp section
rellenarArray(b);
}
}
//2 Hilos
number=2;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(c+i)=*(a+i)+*(b+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
//4 Hilos
number=4;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(c+i)=*(a+i)+*(b+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
//6 Hilos
number=6;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(c+i)=*(a+i)+*(b+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
//8 Hilos
number=8;
start = omp_get_wtime();
//PROGRAMA
#pragma omp parallel for num_threads(number)
for(int i=0; i<TAM; ++i)
*(c+i)=*(a+i)+*(b+i);
//FIN PROGRAMA
printf("\n-------------------------------------------\nTiempo de ejecucion del programa con %i hilos: %lfs\n-------------------------------------------\n",number,omp_get_wtime()-start);
return 0;
}
|
parallel-firstprivate.c | /*
* parallel-firstprivate.c -- Archer testcase
*/
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
//
// See tools/archer/LICENSE.txt for details.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// RUN: %libarcher-compile-and-run | FileCheck %s
// REQUIRES: tsan
#include <omp.h>
#include <stdio.h>
int main(int argc, char *argv[]) {
int var = 0;
#pragma omp parallel num_threads(2) firstprivate(var)
{ var = 1; }
fprintf(stderr, "DONE\n");
// var should still be 0!
return var;
}
// CHECK-NOT: ThreadSanitizer: data race
// CHECK-NOT: ThreadSanitizer: reported
// CHECK: DONE
|
ocp_nlp_sqp.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp.h"
// external
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
#include "acados_c/ocp_qp_interface.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
int size = 0;
size += sizeof(ocp_nlp_sqp_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// int ii;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP opts
opts->max_iter = 20;
opts->tol_stat = 1e-8;
opts->tol_eq = 1e-8;
opts->tol_ineq = 1e-8;
opts->tol_comp = 1e-8;
opts->ext_qp_res = 0;
opts->qp_warm_start = 0;
opts->warm_start_first_qp = false;
opts->rti_phase = 0;
opts->print_level = 0;
opts->initialize_t_slacks = 0;
// overwrite default submodules opts
// qp tolerance
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq);
qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp);
return;
}
void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "max_iter"))
{
int* max_iter = (int *) value;
opts->max_iter = *max_iter;
}
else if (!strcmp(field, "tol_stat"))
{
double* tol_stat = (double *) value;
opts->tol_stat = *tol_stat;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value);
}
else if (!strcmp(field, "tol_eq"))
{
double* tol_eq = (double *) value;
opts->tol_eq = *tol_eq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value);
}
else if (!strcmp(field, "tol_ineq"))
{
double* tol_ineq = (double *) value;
opts->tol_ineq = *tol_ineq;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value);
}
else if (!strcmp(field, "tol_comp"))
{
double* tol_comp = (double *) value;
opts->tol_comp = *tol_comp;
// TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified.
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value);
}
else if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else if (!strcmp(field, "rti_phase"))
{
int* rti_phase = (int *) value;
if (*rti_phase < 0 || *rti_phase > 0) {
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field.");
printf("possible values are: 0\n");
exit(1);
} else opts->rti_phase = *rti_phase;
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else if (!strcmp(field, "initialize_t_slacks"))
{
int* initialize_t_slacks = (int *) value;
if (*initialize_t_slacks != 0 && *initialize_t_slacks != 1)
{
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for initialize_t_slacks field, need int 0 or 1, got %d.", *initialize_t_slacks);
exit(1);
}
opts->initialize_t_slacks = *initialize_t_slacks;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
}
}
return;
}
void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_memory);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = opts->max_iter+1;
int stat_n = 6;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 3*8; // align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_memory);
align_char_to(8, &c_ptr);
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = opts->max_iter+1;
mem->stat_n = 6;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
align_char_to(8, &c_ptr);
assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims,
ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// tmp qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// tmp qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
acados_timer timer0, timer1;
acados_tic(&timer0);
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
// zero timers
double total_time = 0.0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_qp_xcond = 0.0;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
mem->time_tot = 0.0;
mem->time_glob = 0.0;
int N = dims->N;
int ii;
int qp_iter = 0;
int qp_status = 0;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_rev_ptr(nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxe_ptr(nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere.
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
//
if (opts->initialize_t_slacks > 0)
ocp_nlp_initialize_t_slacks(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// main sqp loop
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
for (; sqp_iter < opts->max_iter; sqp_iter++)
{
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// compute nlp residuals
ocp_nlp_res_compute(dims, nlp_in, nlp_out, nlp_mem->nlp_res, nlp_mem);
nlp_out->inf_norm_res = nlp_mem->nlp_res->inf_norm_res_stat;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_eq > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_eq :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_ineq > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_ineq :
nlp_out->inf_norm_res;
nlp_out->inf_norm_res = (nlp_mem->nlp_res->inf_norm_res_comp > nlp_out->inf_norm_res) ?
nlp_mem->nlp_res->inf_norm_res_comp :
nlp_out->inf_norm_res;
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
// save statistics
if (sqp_iter < mem->stat_m)
{
mem->stat[mem->stat_n*sqp_iter+0] = nlp_mem->nlp_res->inf_norm_res_stat;
mem->stat[mem->stat_n*sqp_iter+1] = nlp_mem->nlp_res->inf_norm_res_eq;
mem->stat[mem->stat_n*sqp_iter+2] = nlp_mem->nlp_res->inf_norm_res_ineq;
mem->stat[mem->stat_n*sqp_iter+3] = nlp_mem->nlp_res->inf_norm_res_comp;
}
// exit conditions on residuals
if ((nlp_mem->nlp_res->inf_norm_res_stat < opts->tol_stat) &
(nlp_mem->nlp_res->inf_norm_res_eq < opts->tol_eq) &
(nlp_mem->nlp_res->inf_norm_res_ineq < opts->tol_ineq) &
(nlp_mem->nlp_res->inf_norm_res_comp < opts->tol_comp))
{
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
nlp_out->total_time = total_time;
mem->time_tot = total_time;
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_SUCCESS;
if (opts->print_level > 0)
{
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq,
nlp_mem->nlp_res->inf_norm_res_comp );
printf("\n\n");
}
return mem->status;
}
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// (typically) no warm start at first iteration
if (sqp_iter == 0 && !opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time);
mem->time_qp_xcond += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize, dims->regularize,
opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// restore default warm start
if (sqp_iter==0)
{
config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts,
"warm_start", &opts->qp_warm_start);
}
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
// printf("\nqp_iter = %d, sqp_iter = %d, max_sqp_iter = %d\n", nlp_out->qp_iter, sqp_iter, opts->max_iter);
qp_iter = qp_info_->num_iter;
// save statistics of last qp solver call
if (sqp_iter+1 < mem->stat_m)
{
mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status;
mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter;
}
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws);
if (sqp_iter+1 < mem->stat_m)
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6));
}
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(nlp_mem->qp_in);
if (opts->print_level > 0)
{
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq,
nlp_mem->nlp_res->inf_norm_res_comp );
printf("\n\n");
}
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// stop timer
total_time += acados_toc(&timer0);
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
#ifndef ACADOS_SILENT
printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter);
#endif
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
if (opts->print_level > 1)
{
printf("\n Failed to solve the following QP:\n");
if (opts->print_level > sqp_iter + 1)
print_ocp_qp_in(nlp_mem->qp_in);
}
mem->status = ACADOS_QP_FAILURE;
return mem->status;
}
// globalization
acados_tic(&timer1);
double alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_glob += acados_toc(&timer1);
// update variables
ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// ??? @rien
// for (int_t i = 0; i < N; i++)
// {
// ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i];
// sim_opts *opts = dynamics_opts->sim_solver;
// if (opts->scheme == NULL)
// continue;
// opts->sens_adj = (opts->scheme->type != exact);
// if (nlp_in->freezeSens) {
// // freeze inexact sensitivities after first SQP iteration !!
// opts->scheme->freeze = true;
// }
// }
if (opts->print_level > 0)
{
if (sqp_iter%10 == 0)
{
printf("# it\tstat\t\teq\t\tineq\t\tcomp\n");
}
printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_mem->nlp_res->inf_norm_res_stat,
nlp_mem->nlp_res->inf_norm_res_eq, nlp_mem->nlp_res->inf_norm_res_ineq, nlp_mem->nlp_res->inf_norm_res_comp );
}
}
// stop timer
total_time += acados_toc(&timer0);
if (opts->print_level > 0)
printf("\n\n");
// ocp_nlp_out_print(nlp_out);
// save sqp iterations number
mem->sqp_iter = sqp_iter;
nlp_out->sqp_iter = sqp_iter;
// save time
mem->time_tot = total_time;
nlp_out->total_time = total_time;
// maximum number of iterations reached
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
mem->status = ACADOS_MAXITER;
#ifndef ACADOS_SILENT
printf("\n ocp_nlp_sqp: maximum iterations reached\n");
#endif
return mem->status;
}
int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(all) add flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.",
ii, dims->ns[ii], module_val);
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii],
nlp_mem->dynamics[ii], nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_,
char *field, int stage, int index, void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
ocp_nlp_sqp_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_workspace *work = work_;
ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out,
opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = mem->sqp_iter;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_qp_xcond", field))
{
double *value = return_value_;
*value = mem->time_qp_xcond;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("time_glob", field))
{
double *value = return_value_;
*value = mem->time_glob;
}
else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field))
{
double tmp = 0.0;
double *ptr = return_value_;
int N = dims->N;
int ii;
for (ii=0; ii<N; ii++)
{
config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp);
*ptr += tmp;
}
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("statistics", field))
{
int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1;
double *value = return_value_;
for (int ii=0; ii<n_row; ii++)
{
value[ii+0] = ii;
for (int jj=0; jj<mem->stat_n; jj++)
value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n];
}
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_mem->nlp_res;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver,
mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else if (!strcmp("res_stat", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_stat;
}
else if (!strcmp("res_eq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_eq;
}
else if (!strcmp("res_ineq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_ineq;
}
else if (!strcmp("res_comp", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_comp;
}
else if (!strcmp("cost_value", field))
{
double *value = return_value_;
*value = mem->nlp_mem->cost_value;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_opts_get(void *config_, void *dims_, void *opts_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_opts *opts = opts_;
if (!strcmp("nlp_opts", field))
{
void **value = return_value_;
*value = opts->nlp_opts;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_opts_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_work_get(void *config_, void *dims_, void *work_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_workspace *work = work_;
if (!strcmp("nlp_work", field))
{
void **value = return_value_;
*value = work->nlp_work;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_work_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_opts_update;
config->opts_set = &ocp_nlp_sqp_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp;
config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default;
config->precompute = &ocp_nlp_sqp_precompute;
config->get = &ocp_nlp_sqp_get;
config->opts_get = &ocp_nlp_sqp_opts_get;
config->work_get = &ocp_nlp_sqp_work_get;
return;
}
|
mkldnn_rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_IMPL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_IMPL_H_
#if MXNET_USE_MKLDNN == 1
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <mxnet/storage.h>
#include <algorithm>
#include <map>
#include <vector>
#include <utility>
#include <string>
#include "../../math_functions-inl.h"
#include "../../operator_common.h"
#include "../../rnn_impl.h"
#include "../../rnn-inl.h"
#include "mkldnn.hpp"
#include "./mkldnn_base-inl.h"
namespace mxnet {
namespace op {
static algorithm GetMKLDNNRNNAlgo(int mode,
int* ngates,
int* nstates) {
algorithm algo = algorithm::vanilla_rnn;
switch (mode) {
case rnn_enum::kLstm:
*ngates = 4;
*nstates = 2;
algo = algorithm::vanilla_lstm;
break;
case rnn_enum::kGru:
*ngates = 3;
*nstates = 1;
algo = algorithm::vanilla_gru;
break;
case rnn_enum::kRnnRelu:
case rnn_enum::kRnnTanh:
*ngates = 1;
*nstates = 1;
algo = algorithm::vanilla_rnn;
break;
default:
LOG(FATAL) << "unsupported RNN mode:" << mode;
break;
}
return algo;
}
static void ConcatData(mkldnn::memory::format src_format,
mkldnn::memory::format dst_format,
std::vector<mkldnn::memory::dims> srcs_cds,
mkldnn::memory::dims dst_cds,
mkldnn::memory::data_type mkldnn_dtype,
int concat_dimension,
std::vector<void*> srcs_data,
const mkldnn::memory &dst) {
auto cpu_engine = CpuEngine::Get()->get_engine();
std::vector<mkldnn::memory::primitive_desc> srcs_pd;
std::vector<mkldnn::memory> srcs;
for (size_t i = 0; i < srcs_cds.size(); i++) {
auto desc = mkldnn::memory::desc(srcs_cds[i], mkldnn_dtype, src_format);
auto mpd = mkldnn::memory::primitive_desc(desc, cpu_engine);
auto src_memory = mkldnn::memory(mpd, srcs_data[i]);
srcs_pd.push_back(mpd);
srcs.push_back(src_memory);
}
std::vector<primitive::at> inputs;
for (size_t i = 0; i < srcs_cds.size(); i++) {
inputs.push_back(srcs[i]);
}
auto dst_desc = mkldnn::memory::desc(dst_cds, mkldnn_dtype, dst_format);
auto concat_pd = concat::primitive_desc(dst_desc, concat_dimension, srcs_pd);
MKLDNNStream::Get()->RegisterPrim(concat(concat_pd, inputs, dst));
MKLDNNStream::Get()->Submit();
}
// cached mkldnn memory
// first layer wx, wh with next L - 1 layers wx and wh
// with L layers hx and cx, src and dst data/iter etc.
// it will prepare memory on before and after reorder and concat.
// for unidirectional, it will fused as dim like 1 + (L - 1) when I != H.
// for bidirectional, it will fused as data + back_data (weight, bias, iter etc),
// also need to identify first layer and next layers
static size_t GetMKLDNNRNNCacheMemorySize(int L,
int D,
int T,
int N,
int I,
int H,
int mode) {
size_t size = 0;
switch (mode) {
case rnn_enum::kLstm:
size = 2 * (D * (I + H) * 4 * H + (L - 1) * D * (D * H + H) * 4 * H +
L * D * 2 * N * H) + T * N * D * H + L * 2 * D * 4 * H + (L + 2) * D * 2 * N * H +
6 * D * (I + H + 2) * 4 * H + T * N * I * 2;
break;
case rnn_enum::kGru:
size = 2 * (D * (I + H) * 3 * H + (L - 1) * D * (D * H + H) * 3 * H +
L * D * 2 * N * H) + T * N * D * H + L * 2 * D * 3 * H + (L + 2) * D * 2 * N * H +
6 * D * (I + H + 2) * 3 * H + T * N * I * 2;
break;
case rnn_enum::kRnnRelu:
case rnn_enum::kRnnTanh:
size = 2 * (D * (I + H) * 1 * H + (L - 1) * D * (D * H + H) * 1 * H +
L * D * 2 * N * H) + T * N * D * H + L * 2 * D * 1 * H + (L + 2) * D * 2 * N * H +
6 * D * (I + H + 2) * 1 * H + T * N * I * 2;
break;
default:
LOG(FATAL) << "unknown RNN mode " << mode;
break;
}
return size;
}
template <typename DType>
static void AdjustGruWeightGateOrder(DType* weight,
const int I,
const int H) {
// mxnet gru gate order is reset, update and new gates
// mkldnn gru gate order is update, reset and new gates
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
DType* weight_reset = weight;
DType* weight_update = weight + I * H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < I * H; i++) {
DType tmp = weight_update[i];
weight_update[i] = weight_reset[i];
weight_reset[i] = tmp;
}
}
template <typename DType>
static void AdjustGruBiasGateOrder(DType* bias,
const int H) {
// mxnet gru gate order is reset, update and new gates
// mkldnn gru gate order is update, reset and new gates
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
DType* bias_reset = bias;
DType* bias_update = bias + H;
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; i++) {
DType tmp = bias_update[i];
bias_update[i] = bias_reset[i];
bias_reset[i] = tmp;
}
}
// since there is different sematics of MKLDNN's Fused RNN and MXNet FusedRNN,
// bidirectional will be fused layer by layer,
// unidirectional will be done by fused 1 + fused (L - 1) layers or fused L layers(when I = H)
template <typename DType>
static void MKLDNNRNNForwardSingleLayerBi(bool state_outputs,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
mkldnn::memory *user_src_layer_memory,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory> *concat_weight_memory,
std::vector<mkldnn::memory> *concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
int layer_index,
bool *has_cache,
int lvalue,
int dtype,
bool is_train,
int mode) {
int ngates = 0, nstates = 0;
algorithm nalgorithm = GetMKLDNNRNNAlgo(mode, &ngates, &nstates);
mkldnn::memory::data_type mkldnn_dtype = get_mkldnn_type(dtype);
const int single_cell_size = N * H;
const int single_b_size = ngates * H;
DType* wx = w_ptr; // ngates * H, I
DType* wh = w_ptr + I * H * ngates; // ngates * H, H
DType* back_wx = w_ptr + ngates * H * (I + H);
DType* back_wh = back_wx + I * H * ngates;
DType* bx = b_ptr;
DType* bh = b_ptr + H * ngates;
DType* back_bx = b_ptr + single_b_size * 2;
DType* back_bh = back_bx + H * ngates;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
auto cpu_engine = CpuEngine::Get()->get_engine();
auto null_memory_ = null_memory(cpu_engine);
int offset1 = 0, offset2 = 0;
bool initialized = *has_cache;
mkldnn::memory::dims src_layer_tz = {T, N, I};
mkldnn::memory::dims dst_layer_tz = {T, N, 2 * H};
mkldnn::memory::dims weights_layer_tz = {1, 2, I, ngates, H}; // ldigo
mkldnn::memory::dims weights_layer_r_tz = {1, 1, I, ngates, H}; // ldigo for reorder
mkldnn::memory::dims weights_iter_tz = {1, 2, H, ngates, H}; // ldigo
mkldnn::memory::dims weights_iter_r_tz = {1, 1, H, ngates, H}; // ldigo for reorder
mkldnn::memory::dims bias_tz = {1, 2, ngates, H};
mkldnn::memory::dims src_iter_tz = {1, 2, nstates, N, H}; // ldsnc
mkldnn::memory::dims dst_iter_tz = {1, 2, nstates, N, H}; // ldsnc
if (!initialized) {
if (mode == rnn_enum::kGru) {
AdjustGruWeightGateOrder(wx, I, H);
AdjustGruWeightGateOrder(back_wx, I, H);
AdjustGruWeightGateOrder(wh, H, H);
AdjustGruWeightGateOrder(back_wh, H, H);
AdjustGruBiasGateOrder(bx, H);
AdjustGruBiasGateOrder(back_bx, H);
AdjustGruBiasGateOrder(bh, H);
AdjustGruBiasGateOrder(back_bh, H);
}
auto src_wx = (*concat_weight_memory)[2 * layer_index];
auto src_wh = (*concat_weight_memory)[2 * layer_index + 1];
std::vector<void*> srcs_data1;
srcs_data1.push_back(wx);
srcs_data1.push_back(back_wx);
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
{weights_layer_r_tz, weights_layer_r_tz}, weights_layer_tz,
mkldnn_dtype, 1, srcs_data1, src_wx);
srcs_data1.clear();
srcs_data1.push_back(wh);
srcs_data1.push_back(back_wh);
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
{weights_iter_r_tz, weights_iter_r_tz}, weights_iter_tz,
mkldnn_dtype, 1, srcs_data1, src_wh);
int tmpvalue = 0;
if (lvalue > 0) {
tmpvalue = lvalue + 1;
}
MKLDNNStream::Get()->RegisterPrim(reorder(src_wx, (*wx_memory)[tmpvalue]));
MKLDNNStream::Get()->RegisterPrim(reorder(src_wh, (*wh_memory)[tmpvalue]));
DType* user_bias = reinterpret_cast<DType *>
((*bias_memory)[tmpvalue].get_data_handle());
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < single_b_size; j++) {
user_bias[j] = bx[j] + bh[j];
user_bias[single_b_size + j] = back_bx[j] + back_bh[j];
}
}
if (lvalue > 0) {
(*wx_memory)[layer_index].set_data_handle((*wx_memory)[lvalue + 1].get_data_handle());
(*wh_memory)[layer_index].set_data_handle((*wh_memory)[lvalue + 1].get_data_handle());
(*bias_memory)[layer_index].set_data_handle((*bias_memory)[lvalue + 1].get_data_handle());
}
auto src_layer_md = mkldnn::memory::desc(
{ src_layer_tz }, mkldnn_dtype, mkldnn::memory::format::tnc);
auto weight_layer_md = mkldnn::memory::desc(
{ weights_layer_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto weight_iter_md = mkldnn::memory::desc(
{ weights_iter_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto dst_layer_md = mkldnn::memory::desc(
{ dst_layer_tz }, mkldnn_dtype, mkldnn::memory::format::tnc);
auto dst_iter_md = mkldnn::memory::desc(
{ dst_iter_tz }, mkldnn_dtype, mkldnn::memory::format::ldsnc);
auto src_iter_md = mkldnn::memory::desc(
{src_iter_tz}, mkldnn_dtype, mkldnn::memory::format::ldsnc);
auto bias_md = mkldnn::memory::desc({bias_tz},
mkldnn_dtype, mkldnn::memory::format::ldgo);
auto user_src_iter_memory = (*concat_iter_memory)[2];
if (mode == rnn_enum::kLstm) {
std::vector<void*> srcs_data1;
srcs_data1.push_back(hx_ptr);
srcs_data1.push_back(cx_ptr);
auto tmp1_src_iter_memory = (*concat_iter_memory)[0];
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, 1, N, H}, {1, 1, 1, N, H}}, {1, 1, nstates, N, H}, mkldnn_dtype, 2,
srcs_data1, tmp1_src_iter_memory);
std::vector<void*> srcs_data2;
srcs_data2.push_back(hx_ptr + single_cell_size);
srcs_data2.push_back(cx_ptr + single_cell_size);
auto tmp2_src_iter_memory = (*concat_iter_memory)[1];
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, 1, N, H}, {1, 1, 1, N, H}}, {1, 1, nstates, N, H}, mkldnn_dtype, 2,
srcs_data2, tmp2_src_iter_memory);
std::vector<void*> srcs_data3;
srcs_data3.push_back(reinterpret_cast<DType *>(tmp1_src_iter_memory.get_data_handle()));
srcs_data3.push_back(reinterpret_cast<DType *>(tmp2_src_iter_memory.get_data_handle()));
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, nstates, N, H}, {1, 1, nstates, N, H}}, {1, 2, nstates, N, H},
mkldnn_dtype, 1, srcs_data3, user_src_iter_memory);
} else {
user_src_iter_memory.set_data_handle(hx_ptr);
}
(*hcx_memory)[layer_index].set_data_handle(user_src_iter_memory.get_data_handle());
rnn_cell::desc rnn_cell(nalgorithm,
mode == rnn_enum::kRnnRelu ? algorithm::eltwise_relu : algorithm::eltwise_tanh);
rnn_forward::desc layer_desc(prop_kind::forward_inference, rnn_cell,
rnn_direction::bidirectional_concat, src_layer_md,
src_iter_md, weight_layer_md, weight_iter_md,
bias_md, dst_layer_md, dst_iter_md);
auto prim_desc
= rnn_forward::primitive_desc(layer_desc, cpu_engine);
if (x_ptr && layer_index == 0) {
(*x_memory)[layer_index].set_data_handle(x_ptr);
} else {
(*x_memory)[layer_index].set_data_handle((*user_src_layer_memory).get_data_handle());
}
(*y_memory)[layer_index].set_data_handle(y_ptr);
if (rnn_forward_prim->size() <= (size_t)layer_index) {
primitive rnn_prim = rnn_forward(prim_desc, (*x_memory)[layer_index],
(*hcx_memory)[layer_index], (*wx_memory)[layer_index],
(*wh_memory)[layer_index], (*bias_memory)[layer_index],
(*y_memory)[layer_index],
(*hcy_memory)[layer_index], null_memory_);
rnn_forward_prim->push_back(rnn_prim);
}
MKLDNNStream::Get()->RegisterPrim((*rnn_forward_prim)[layer_index]);
MKLDNNStream::Get()->Submit();
if (state_outputs) {
DType* dst_hcy = reinterpret_cast<DType *> ((*hcy_memory)[layer_index].get_data_handle());
if (mode == rnn_enum::kLstm) {
offset1 = nstates * single_cell_size;
offset2 = (nstates + 1) * single_cell_size;
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < single_cell_size; n++) {
hy_ptr[n] = dst_hcy[n];
hy_ptr[n + single_cell_size] = dst_hcy[n + offset1];
cy_ptr[n] = dst_hcy[n + single_cell_size];
cy_ptr[n + single_cell_size] = dst_hcy[n + offset2];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < 2 * single_cell_size; n++) {
hy_ptr[n] = dst_hcy[n];
}
}
}
}
template <typename DType>
static void MKLDNNRNNForwardUnidi(bool state_outputs,
const int L,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
mkldnn::memory *user_src_layer_memory,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory> *concat_weight_memory,
std::vector<mkldnn::memory> *concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
int layer_index,
bool *has_cache,
int dtype,
bool is_train,
int mode) {
int ngates = 0, nstates = 0;
algorithm nalgorithm = GetMKLDNNRNNAlgo(mode, &ngates, &nstates);
mkldnn::memory::data_type mkldnn_dtype = get_mkldnn_type(dtype);
const int cell_size = N * H;
const int single_cell_size = N * H;
const int single_b_size = ngates * H;
int w_size = (I + H) * H * ngates;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
auto cpu_engine = CpuEngine::Get()->get_engine();
auto null_memory_ = null_memory(cpu_engine);
int offset1 = 0, offset2 = 0;
bool initialized = *has_cache;
mkldnn::memory::dims src_layer_tz = {T, N, I};
mkldnn::memory::dims dst_layer_tz = {T, N, H};
mkldnn::memory::dims weights_layer_tz = {L, 1, I, ngates, H}; // ldigo
mkldnn::memory::dims weights_iter_tz = {L, 1, H, ngates, H}; // ldigo
mkldnn::memory::dims bias_tz = {L, 1, ngates, H};
mkldnn::memory::dims src_iter_tz = {L, 1, nstates, N, H}; // ldsnc
mkldnn::memory::dims dst_iter_tz = {L, 1, nstates, N, H}; // ldsnc
mkldnn::memory::dims weights_layer_r_tz = {1, 1, I, ngates, H}; // ldigo for reorder
mkldnn::memory::dims weights_iter_r_tz = {1, 1, H, ngates, H}; // ldigo for reorder
auto weight_layer_md = mkldnn::memory::desc(
{ weights_layer_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto weight_iter_md = mkldnn::memory::desc(
{ weights_iter_tz }, mkldnn_dtype, mkldnn::memory::format::ldigo);
auto src_layer_md = mkldnn::memory::desc(
{ src_layer_tz }, mkldnn_dtype, mkldnn::memory::format::tnc);
auto dst_layer_md = mkldnn::memory::desc(
{dst_layer_tz}, mkldnn_dtype, mkldnn::memory::format::tnc);
auto src_iter_md = mkldnn::memory::desc(
{src_iter_tz}, mkldnn_dtype, mkldnn::memory::format::ldsnc);
auto bias_md = mkldnn::memory::desc({bias_tz},
mkldnn_dtype, mkldnn::memory::format::ldgo);
auto dst_iter_md = mkldnn::memory::desc(
{dst_iter_tz}, mkldnn_dtype, mkldnn::memory::format::ldsnc);
for (int l = 0; l < L; l++) {
if (mode == rnn_enum::kLstm) {
std::vector<void*> srcs_data;
srcs_data.push_back(hx_ptr);
srcs_data.push_back(cx_ptr);
auto tmp_src_iter_memory = (*concat_iter_memory)[l + layer_index];
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc,
{{1, 1, 1, N, H}, {1, 1, 1, N, H}}, {1, 1, nstates, N, H}, mkldnn_dtype,
2, srcs_data, tmp_src_iter_memory);
} else {
(*concat_iter_memory)[l + layer_index].set_data_handle(hx_ptr);
}
hx_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cx_ptr += cell_size;
}
}
auto user_src_iter_memory = null_memory_;
if (L == 1) {
user_src_iter_memory = (*concat_iter_memory)[layer_index];
} else {
user_src_iter_memory = (*concat_iter_memory)[L + layer_index];
std::vector<void*> src_l_data;
std::vector<mkldnn::memory::dims> src_l_dim;
for (int l = 0; l < L; l++) {
src_l_data.push_back(reinterpret_cast<DType *>
((*concat_iter_memory)[l + layer_index].get_data_handle()));
src_l_dim.push_back({1, 1, nstates, N, H});
}
ConcatData(mkldnn::memory::format::ldsnc, mkldnn::memory::format::ldsnc, src_l_dim,
{L, 1, nstates, N, H}, mkldnn_dtype, 0, src_l_data, user_src_iter_memory);
}
(*hcx_memory)[layer_index].set_data_handle(user_src_iter_memory.get_data_handle());
auto src_wx_f = (*concat_weight_memory)[2 * layer_index];
auto src_wh_f = (*concat_weight_memory)[2 * layer_index + 1];
std::vector<void*> srcs_data_x;
std::vector<void*> srcs_data_h;
std::vector<mkldnn::memory::dims> src_l_dim_x;
std::vector<mkldnn::memory::dims> src_l_dim_h;
if (!initialized) {
if (L == 1) {
DType* wx = w_ptr;
DType* wh = w_ptr + I * H * ngates;
if (mode == rnn_enum::kGru) {
AdjustGruWeightGateOrder(wx, I, H);
AdjustGruWeightGateOrder(wh, H, H);
AdjustGruBiasGateOrder(b_ptr, H);
AdjustGruBiasGateOrder(b_ptr + H * ngates, H);
}
src_wx_f.set_data_handle(wx);
src_wh_f.set_data_handle(wh);
} else {
for (int l = 0; l < L; l++) {
DType* wx = w_ptr;
DType* wh = w_ptr + I * H * ngates;
DType* bx = b_ptr + l * ngates * H * 2;
DType* bh = b_ptr + l * ngates * H * 2 + H * ngates;
if (mode == rnn_enum::kGru) {
AdjustGruWeightGateOrder(wx, I, H);
AdjustGruWeightGateOrder(wh, H, H);
AdjustGruBiasGateOrder(bx, H);
AdjustGruBiasGateOrder(bh, H);
}
srcs_data_x.push_back(wx);
srcs_data_h.push_back(wh);
src_l_dim_x.push_back(weights_layer_r_tz);
src_l_dim_h.push_back(weights_iter_r_tz);
w_ptr = w_ptr + w_size;
}
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
src_l_dim_x, weights_layer_tz, mkldnn_dtype, 0, srcs_data_x, src_wx_f);
ConcatData(mkldnn::memory::format::ldgoi, mkldnn::memory::format::ldgoi,
src_l_dim_h, weights_iter_tz, mkldnn_dtype, 0, srcs_data_h, src_wh_f);
}
MKLDNNStream::Get()->RegisterPrim(reorder(src_wx_f, (*wx_memory)[layer_index]));
MKLDNNStream::Get()->RegisterPrim(reorder(src_wh_f, (*wh_memory)[layer_index]));
DType* user_bias_f = reinterpret_cast<DType *> ((*bias_memory)[layer_index].get_data_handle());
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < L * single_b_size; j++) {
int k = j / single_b_size;
user_bias_f[j] = b_ptr[j + k * single_b_size] + b_ptr[j + k * single_b_size + single_b_size];
}
}
rnn_cell::desc rnn_cell(nalgorithm,
mode == rnn_enum::kRnnRelu ? algorithm::eltwise_relu : algorithm::eltwise_tanh);
rnn_forward::desc layer_desc(prop_kind::forward_inference, rnn_cell,
rnn_direction::unidirectional, src_layer_md,
src_iter_md, weight_layer_md, weight_iter_md,
bias_md, dst_layer_md, dst_iter_md);
auto prim_desc
= rnn_forward::primitive_desc(layer_desc, cpu_engine);
if (x_ptr && layer_index == 0) {
(*x_memory)[layer_index].set_data_handle(x_ptr);
} else {
(*x_memory)[layer_index].set_data_handle((*user_src_layer_memory).get_data_handle());
}
(*y_memory)[layer_index].set_data_handle(y_ptr);
if (rnn_forward_prim->size() <= (size_t)layer_index) {
primitive rnn_prim = rnn_forward(prim_desc, (*x_memory)[layer_index],
(*hcx_memory)[layer_index], (*wx_memory)[layer_index],
(*wh_memory)[layer_index], (*bias_memory)[layer_index],
(*y_memory)[layer_index],
(*hcy_memory)[layer_index], null_memory_);
rnn_forward_prim->push_back(rnn_prim);
}
MKLDNNStream::Get()->RegisterPrim((*rnn_forward_prim)[layer_index]);
MKLDNNStream::Get()->Submit();
if (state_outputs) {
DType* dst_hcy = reinterpret_cast<DType *> ((*hcy_memory)[layer_index].get_data_handle());
if (mode == rnn_enum::kLstm) {
for (int l = 0; l < L; l++) {
offset1 = l * single_cell_size;
offset2 = l * nstates * single_cell_size;
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < single_cell_size; n++) {
hy_ptr[offset1 + n] = dst_hcy[offset2 + n];
cy_ptr[offset1 + n] = dst_hcy[offset2 + n + single_cell_size];
}
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (int n = 0; n < L * single_cell_size; n++) {
hy_ptr[n] = dst_hcy[n];
}
}
}
}
template <typename DType>
static void MKLDNNRNNForward(bool state_outputs,
const int L,
const int D,
const int T,
const int N,
const int I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory> *concat_weight_memory,
std::vector<mkldnn::memory> *concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
bool *has_cache,
int dtype,
bool is_train,
int mode) {
int ngates = 0, nstates = 0;
GetMKLDNNRNNAlgo(mode, &ngates, &nstates);
const int b_size = 2 * H * ngates * D;
const int cell_size = N * H * D;
// First layer
int w_size = (I + H) * H * ngates * D;
auto cpu_engine = CpuEngine::Get()->get_engine();
auto null_memory_ = null_memory(cpu_engine);
DType* tmpNull = NULL;
// when D = 1 and I == H, L layers can be fused together
if (D == 1 && I == H) {
MKLDNNRNNForwardUnidi(state_outputs, L, T, N, I, H, x_ptr, &null_memory_,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
0, has_cache, dtype, is_train, mode);
} else {
auto user_src_layer_memory_l = null_memory_;
if (D == 2) {
MKLDNNRNNForwardSingleLayerBi(state_outputs, T, N, I, H, x_ptr, &user_src_layer_memory_l,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
0, has_cache, 0, dtype, is_train, mode);
} else {
MKLDNNRNNForwardUnidi(state_outputs, 1, T, N, I, H, x_ptr, &user_src_layer_memory_l,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
0, has_cache, dtype, is_train, mode);
}
if (L > 1) {
user_src_layer_memory_l = (*y_memory)[0];
// go to next L - 1 layers.
// If D = 2, do it layer by layer. If D = 1, fused L - 1 layers
w_ptr += w_size;
b_ptr += b_size;
if (D == 2) {
w_size = (H * D + H) * H * ngates * D;
for (int l = 0; l < L - 1; l++) {
if (state_outputs) {
hy_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cy_ptr += cell_size;
}
}
hx_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cx_ptr += cell_size;
}
MKLDNNRNNForwardSingleLayerBi(state_outputs, T, N, D * H, H, tmpNull,
&user_src_layer_memory_l, hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr,
cy_ptr, concat_weight_memory, concat_iter_memory, x_memory,
hcx_memory, wx_memory, wh_memory, bias_memory,
y_memory, hcy_memory, rnn_forward_prim,
1, has_cache, l + 1, dtype, is_train, mode);
user_src_layer_memory_l = (*y_memory)[1];
w_ptr += w_size;
b_ptr += b_size;
}
}
if (D == 1) {
if (state_outputs) {
hy_ptr += cell_size;
if (mode == rnn_enum::kLstm) {
cy_ptr += cell_size;
}
}
w_size = (H + H) * H * ngates;
MKLDNNRNNForwardUnidi(state_outputs, L - 1, T, N, H, H, tmpNull, &user_src_layer_memory_l,
hx_ptr, cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr, concat_weight_memory,
concat_iter_memory, x_memory, hcx_memory, wx_memory,
wh_memory, bias_memory, y_memory, hcy_memory,
rnn_forward_prim, 1, has_cache, dtype, is_train, mode);
}
}
}
*has_cache = true;
}
template <typename DType>
static void MKLDNNRNNForwardInference(bool state_outputs,
const int num_layers,
const int direction,
const int seq_length,
const int batch_size,
const int input_size,
const int state_size,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
std::vector<mkldnn::memory>* concat_weight_memory,
std::vector<mkldnn::memory>* concat_iter_memory,
std::vector<mkldnn::memory> *x_memory,
std::vector<mkldnn::memory> *hcx_memory,
std::vector<mkldnn::memory> *wx_memory,
std::vector<mkldnn::memory> *wh_memory,
std::vector<mkldnn::memory> *bias_memory,
std::vector<mkldnn::memory> *y_memory,
std::vector<mkldnn::memory> *hcy_memory,
std::vector<primitive> *rnn_forward_prim,
bool *has_cache,
int dtype,
bool is_train,
int mode) {
switch (mode) {
case rnn_enum::kLstm:
case rnn_enum::kGru:
case rnn_enum::kRnnTanh:
case rnn_enum::kRnnRelu:
MKLDNNRNNForward<DType>(state_outputs, num_layers, direction, seq_length,
batch_size, input_size, state_size, x_ptr, hx_ptr,
cx_ptr, w_ptr, b_ptr, y_ptr, hy_ptr, cy_ptr,
concat_weight_memory, concat_iter_memory, x_memory,
hcx_memory, wx_memory, wh_memory,
bias_memory, y_memory, hcy_memory, rnn_forward_prim,
has_cache, dtype, is_train, mode);
break;
default:
LOG(FATAL) << "unknown RNN mode" << mode;
break;
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_MKLDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_IMPL_H_
|
openmp.h | #ifndef _OPENMP_H
#define _OPENMP_H
#if defined(_OPENMP)
#include <omp.h>
#else
typedef int omp_int_t;
inline omp_int_t omp_get_thread_num() { return 0;}
inline omp_int_t omp_get_num_threads() { return 1;}
inline omp_int_t omp_get_max_threads() { return 1;}
#endif
#include <complex>
// #include "complex_ops.h"
namespace basis_general
{
template<class T>
inline void atomic_add(const std::complex<double> m,std::complex<T> *M){
T * M_v = reinterpret_cast<T*>(M);
const T m_real = m.real();
const T m_imag = m.imag();
#pragma omp atomic
M_v[0] += m_real;
#pragma omp atomic
M_v[1] += m_imag;
}
template<class T>
inline void atomic_add(const std::complex<double> m,T *M){
const T m_real = m.real();
#pragma omp atomic
M[0] += m_real;
}
}
/*
namespace basis_general_addition
{
int inline atomic_add(const npy_cdouble_wrapper m,npy_cdouble_wrapper *M){
double * M_v = reinterpret_cast<double*>(M);
const double m_real = m.real;
const double m_imag = m.imag;
#pragma omp atomic
M_v[0] += m_real;
#pragma omp atomic
M_v[1] += m_imag;
return 0;
}
int inline atomic_add(const npy_cdouble_wrapper m,npy_cfloat_wrapper *M){
float * M_v = reinterpret_cast<float*>(M);
const float m_real = m.real;
const float m_imag = m.imag;
#pragma omp atomic
M_v[0] += m_real;
#pragma omp atomic
M_v[1] += m_imag;
return 0;
}
template<class T>
int inline atomic_add(const npy_cdouble_wrapper m,T *M){
if(std::abs(m.imag)>1.1e-15){
return 1;
}
else{
const T m_real = (T)m.real;
#pragma omp atomic
M[0] += m_real;
return 0;
}
}
}
*/
#endif |
omp.c | #include <stdio.h>
#include <omp.h>
int a, b, i, tid;
float x;
#pragma omp threadprivate(a, x)
int main(int argc, char *argv[]) {
/* Explicitly turn off dynamic threads */
omp_set_dynamic(0);
printf("1st Parallel Region:\n");
#pragma omp parallel private(b,tid)
{
tid = omp_get_thread_num();
a = tid;
b = tid;
x = 1.1 * tid +1.0;
printf("Thread %d: a,b,x= %d %d %f\n",tid,a,b,x);
} /* end of parallel region */
printf("************************************\n");
printf("Master thread doing serial work here\n");
printf("************************************\n");
printf("2nd Parallel Region:\n");
#pragma omp parallel private(tid)
{
tid = omp_get_thread_num();
printf("Thread %d: a,b,x= %d %d %f\n",tid,a,b,x);
} /* end of parallel region */
return 0;
}
|
space_to_batch.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_SPACE_TO_BATCH_H_
#define MACE_KERNELS_SPACE_TO_BATCH_H_
#include <memory>
#include <vector>
#include <algorithm>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/public/mace.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
struct SpaceToBatchFunctorBase {
SpaceToBatchFunctorBase(const std::vector<int> &paddings,
const std::vector<int> &block_shape,
bool b2s)
: paddings_(paddings.begin(), paddings.end()),
block_shape_(block_shape.begin(), block_shape.end()),
b2s_(b2s) {
MACE_CHECK(
block_shape.size() == 2 && block_shape[0] > 1 && block_shape[1] > 1,
"Block's shape should be 1D, and greater than 1");
MACE_CHECK(paddings.size() == 4, "Paddings' shape should be 2D");
}
std::vector<int> paddings_;
std::vector<int> block_shape_;
bool b2s_;
protected:
void CalculateSpaceToBatchOutputShape(const Tensor *input_tensor,
const DataFormat data_format,
index_t *output_shape) {
MACE_CHECK(input_tensor->dim_size() == 4, "Input's shape should be 4D");
index_t batch = input_tensor->dim(0);
index_t channels = 0;
index_t height = 0;
index_t width = 0;
if (data_format == DataFormat::NHWC) {
height = input_tensor->dim(1);
width = input_tensor->dim(2);
channels = input_tensor->dim(3);
} else if (data_format == DataFormat::NCHW) {
height = input_tensor->dim(2);
width = input_tensor->dim(3);
channels = input_tensor->dim(1);
} else {
MACE_NOT_IMPLEMENTED;
}
index_t padded_height = height + paddings_[0] + paddings_[1];
index_t padded_width = width + paddings_[2] + paddings_[3];
MACE_CHECK(padded_height % block_shape_[0] == 0, "padded input height",
padded_height, " is not divisible by block height");
MACE_CHECK(padded_width % block_shape_[1] == 0, "padded input width",
padded_height, " is not divisible by block width");
index_t new_batch = batch * block_shape_[0] * block_shape_[1];
index_t new_height = padded_height / block_shape_[0];
index_t new_width = padded_width / block_shape_[1];
if (data_format == DataFormat::NHWC) {
output_shape[0] = new_batch;
output_shape[1] = new_height;
output_shape[2] = new_width;
output_shape[3] = channels;
} else {
output_shape[0] = new_batch;
output_shape[1] = channels;
output_shape[2] = new_height;
output_shape[3] = new_width;
}
}
void CalculateBatchToSpaceOutputShape(const Tensor *input_tensor,
const DataFormat data_format,
index_t *output_shape) {
MACE_CHECK(input_tensor->dim_size() == 4, "Input's shape should be 4D");
index_t batch = input_tensor->dim(0);
index_t channels = 0;
index_t height = 0;
index_t width = 0;
if (data_format == DataFormat::NHWC) {
height = input_tensor->dim(1);
width = input_tensor->dim(2);
channels = input_tensor->dim(3);
} else if (data_format == DataFormat::NCHW) {
height = input_tensor->dim(2);
width = input_tensor->dim(3);
channels = input_tensor->dim(1);
} else {
MACE_NOT_IMPLEMENTED;
}
index_t new_batch = batch / block_shape_[0] / block_shape_[1];
index_t new_height = height * block_shape_[0] - paddings_[0] - paddings_[1];
index_t new_width = width * block_shape_[1] - paddings_[2] - paddings_[3];
if (data_format == DataFormat::NHWC) {
output_shape[0] = new_batch;
output_shape[1] = new_height;
output_shape[2] = new_width;
output_shape[3] = channels;
} else {
output_shape[0] = new_batch;
output_shape[1] = channels;
output_shape[2] = new_height;
output_shape[3] = new_width;
}
}
};
template<DeviceType D, typename T>
struct SpaceToBatchFunctor;
template<>
struct SpaceToBatchFunctor<DeviceType::CPU, float> : SpaceToBatchFunctorBase {
SpaceToBatchFunctor(const std::vector<int> &paddings,
const std::vector<int> &block_shape,
bool b2s)
: SpaceToBatchFunctorBase(paddings, block_shape, b2s) {}
MaceStatus operator()(Tensor *space_tensor,
Tensor *batch_tensor,
StatsFuture *future) {
MACE_UNUSED(future);
std::vector<index_t> output_shape(4, 0);
if (b2s_) {
CalculateBatchToSpaceOutputShape(batch_tensor,
DataFormat::NCHW,
output_shape.data());
MACE_RETURN_IF_ERROR(space_tensor->Resize(output_shape));
} else {
CalculateSpaceToBatchOutputShape(space_tensor,
DataFormat::NCHW,
output_shape.data());
MACE_RETURN_IF_ERROR(batch_tensor->Resize(output_shape));
}
Tensor::MappingGuard input_guard(space_tensor);
Tensor::MappingGuard output_guard(batch_tensor);
int pad_top = paddings_[0];
int pad_left = paddings_[2];
int block_shape_h = block_shape_[0];
int block_shape_w = block_shape_[1];
if (b2s_) {
const float *input_data = batch_tensor->data<float>();
float *output_data = space_tensor->mutable_data<float>();
index_t in_batches = batch_tensor->dim(0);
index_t in_height = batch_tensor->dim(2);
index_t in_width = batch_tensor->dim(3);
index_t out_batches = space_tensor->dim(0);
index_t channels = space_tensor->dim(1);
index_t out_height = space_tensor->dim(2);
index_t out_width = space_tensor->dim(3);
// 32k/sizeof(float)/out_width/block_shape
index_t
block_h_size =
std::max(static_cast<index_t>(1), 8 * 1024 / block_shape_w / out_width);
// make channel outter loop so we can make best use of cache
#pragma omp parallel for collapse(3)
for (index_t c = 0; c < channels; ++c) {
for (index_t block_h = 0; block_h < in_height;
block_h += block_h_size) {
for (index_t in_b = 0; in_b < in_batches; ++in_b) {
const index_t b = in_b % out_batches;
const index_t tile_index = in_b / out_batches;
const index_t tile_h = tile_index / block_shape_w;
const index_t tile_w = tile_index % block_shape_w;
const index_t valid_h_start = std::max(block_h,
(pad_top - tile_h
+ block_shape_h - 1)
/ block_shape_h);
const index_t valid_h_end = std::min(in_height,
std::min(
block_h + block_h_size,
(out_height + pad_top
- tile_h
+ block_shape_h - 1)
/ block_shape_h));
const index_t valid_w_start = std::max(static_cast<index_t>(0),
(pad_left - tile_w
+ block_shape_w - 1)
/ block_shape_w);
const index_t valid_w_end = std::min(in_width,
(out_width + pad_left - tile_w
+ block_shape_w - 1)
/ block_shape_w);
const float *input_base =
input_data + (in_b * channels + c) * in_height * in_width;
float *output_base =
output_data + (b * channels + c) * out_height * out_width;
index_t h = valid_h_start * block_shape_h + tile_h - pad_top;
for (index_t in_h = valid_h_start; in_h < valid_h_end; ++in_h) {
index_t w = valid_w_start * block_shape_w + tile_w - pad_left;
for (index_t in_w = valid_w_start; in_w < valid_w_end; ++in_w) {
output_base[h * out_width + w] =
input_base[in_h * in_width + in_w];
w += block_shape_w;
} // w
h += block_shape_h;
} // h
} // b
} // block_h
} // c
} else {
const float *input_data = space_tensor->data<float>();
float *output_data = batch_tensor->mutable_data<float>();
index_t in_batches = space_tensor->dim(0);
index_t in_height = space_tensor->dim(2);
index_t in_width = space_tensor->dim(3);
index_t out_batches = batch_tensor->dim(0);
index_t channels = batch_tensor->dim(1);
index_t out_height = batch_tensor->dim(2);
index_t out_width = batch_tensor->dim(3);
index_t block_h_size =
std::max(static_cast<index_t>(1), 8 * 1024 / block_shape_w / in_width);
// make channel outter loop so we can make best use of cache
#pragma omp parallel for collapse(3)
for (index_t c = 0; c < channels; ++c) {
for (index_t block_h = 0; block_h < out_height;
block_h += block_h_size) {
for (index_t b = 0; b < out_batches; ++b) {
const index_t in_b = b % in_batches;
const index_t tile_index = b / in_batches;
const index_t tile_h = tile_index / block_shape_w;
const index_t tile_w = tile_index % block_shape_w;
const index_t valid_h_start = std::max(block_h,
(pad_top - tile_h
+ block_shape_h - 1)
/ block_shape_h);
const index_t valid_h_end = std::min(out_height,
std::min(
block_h + block_h_size,
(in_height + pad_top
- tile_h
+ block_shape_h - 1)
/ block_shape_h));
const index_t valid_w_start = std::max(static_cast<index_t>(0),
(pad_left - tile_w
+ block_shape_w - 1)
/ block_shape_w);
const index_t valid_w_end = std::min(out_width,
(in_width + pad_left - tile_w
+ block_shape_w - 1)
/ block_shape_w);
const float *input_base =
input_data + (in_b * channels + c) * in_height * in_width;
float *output_base =
output_data + (b * channels + c) * out_height * out_width;
memset(output_base + block_h * out_width,
0,
(valid_h_start - block_h) * out_width * sizeof(float));
index_t in_h = valid_h_start * block_shape_h + tile_h - pad_top;
for (index_t h = valid_h_start; h < valid_h_end; ++h) {
memset(output_base + h * out_width,
0,
valid_w_start * sizeof(float));
index_t in_w = valid_w_start * block_shape_w + tile_w - pad_left;
for (index_t w = valid_w_start; w < valid_w_end; ++w) {
output_base[h * out_width + w] =
input_base[in_h * in_width + in_w];
in_w += block_shape_w;
} // w
in_h += block_shape_h;
memset(output_base + h * out_width + valid_w_end,
0,
(out_width - valid_w_end) * sizeof(float));
} // h
memset(output_base + valid_h_end * out_width,
0,
(std::min(out_height, block_h + block_h_size) - valid_h_end)
* out_width * sizeof(float));
} // b
} // block_h
} // c
}
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template <typename T>
struct SpaceToBatchFunctor<DeviceType::GPU, T> : SpaceToBatchFunctorBase {
SpaceToBatchFunctor(const std::vector<int> &paddings,
const std::vector<int> &block_shape,
bool b2s)
: SpaceToBatchFunctorBase(paddings, block_shape, b2s) {}
MaceStatus operator()(Tensor *space_tensor,
Tensor *batch_tensor,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> space_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_SPACE_TO_BATCH_H_
|
core_strmm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrmm.c, normal z -> s, Fri Sep 28 17:38:23 2018
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
/***************************************************************************//**
*
* @ingroup core_trmm
*
* Performs a triangular matrix-matrix multiply of the form
*
* \f[B = \alpha [op(A) \times B] \f], if side = PlasmaLeft or
* \f[B = \alpha [B \times op(A)] \f], if side = PlasmaRight
*
* where op( X ) is one of:
*
* - op(A) = A or
* - op(A) = A^T or
* - op(A) = A^T
*
* alpha is a scalar, B is an m-by-n matrix and A is a unit or non-unit, upper
* or lower triangular matrix.
*
*******************************************************************************
*
* @param[in] side
* Specifies whether op( A ) appears on the left or on the right of B:
* - PlasmaLeft: alpha*op( A )*B
* - PlasmaRight: alpha*B*op( A )
*
* @param[in] uplo
* Specifies whether the matrix A is upper triangular or lower
* triangular:
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] transa
* Specifies whether the matrix A is transposed, not transposed or
* conjugate transposed:
* - PlasmaNoTrans: A is transposed;
* - PlasmaTrans: A is not transposed;
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* Specifies whether or not A is unit triangular:
* - PlasmaNonUnit: A is non-unit triangular;
* - PlasmaUnit: A is unit triangular.
*
* @param[in] m
* The number of rows of matrix B.
* m >= 0.
*
* @param[in] n
* The number of columns of matrix B.
* n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* The triangular matrix A of dimension lda-by-k, where k is m when
* side='L' or 'l' and k is n when when side='R' or 'r'. If uplo =
* PlasmaUpper, the leading k-by-k upper triangular part of the array
* A contains the upper triangular matrix, and the strictly lower
* triangular part of A is not referenced. If uplo = PlasmaLower, the
* leading k-by-k lower triangular part of the array A contains the
* lower triangular matrix, and the strictly upper triangular part of
* A is not referenced. If diag = PlasmaUnit, the diagonal elements of
* A are also not referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. When side='L' or 'l',
* lda >= max(1,m), when side='R' or 'r' then lda >= max(1,n).
*
* @param[in,out] B
* On entry, the matrix B of dimension ldb-by-n.
* On exit, the result of a triangular matrix-matrix multiply
* ( alpha*op(A)*B ) or ( alpha*B*op(A) ).
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_strmm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb)
{
cblas_strmm(
CblasColMajor,
(CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
(CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag,
m, n,
(alpha), A, lda,
B, ldb);
}
/******************************************************************************/
void plasma_core_omp_strmm(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
float alpha, const float *A, int lda,
float *B, int ldb,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int k = (side == PlasmaLeft) ? m : n;
#pragma omp task depend(in:A[0:lda*k]) \
depend(inout:B[0:ldb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_strmm(side, uplo,
transa, diag,
m, n,
alpha, A, lda,
B, ldb);
}
}
|
memdbg.h | /*
* Based on John the Ripper and modified to integrate with aircrack
*
* John the Ripper copyright and license.
*
* John the Ripper password cracker,
* Copyright (c) 1996-2013 by Solar Designer.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* As a special exception to the GNU General Public License terms,
* permission is hereby granted to link the code of this program, with or
* without modification, with any version of the OpenSSL library and/or any
* version of unRAR, and to distribute such linked combinations. You must
* obey the GNU GPL in all respects for all of the code used other than
* OpenSSL and unRAR. If you modify this program, you may extend this
* exception to your version of the program, but you are not obligated to
* do so. (In other words, you may release your derived work under pure
* GNU GPL version 2 or later as published by the FSF.)
*
* (This exception from the GNU GPL is not required for the core tree of
* John the Ripper, but arguably it is required for -jumbo.)
*
* Relaxed terms for certain components.
*
* In addition or alternatively to the license above, many components are
* available to you under more relaxed terms (most commonly under cut-down
* BSD license) as specified in the corresponding source files.
*
* For more information on John the Ripper licensing please visit:
*
* http://www.openwall.com/john/doc/LICENSE.shtml
*
* This header file should be the LAST header file included within every
* .c file within the project. If there are .h files that have actual
* code in them, then this header should be the last include within that
* .h file, and that .h file should be the last one included within the
* .c file.
* ****** NOTE *****
*/
#if !defined (__MEM_DBG_H_)
#define __MEM_DBG_H_
// values to use within the MemDbg_Validate() function.
#define MEMDBG_VALIDATE_MIN 0
#define MEMDBG_VALIDATE_DEEP 1
#define MEMDBG_VALIDATE_DEEPER 2
#define MEMDBG_VALIDATE_DEEPEST 3
#include <stdio.h>
#include <stdlib.h>
#if (!AC_BUILT || HAVE_UNISTD_H) && !_MSC_VER
#include <unistd.h>
#endif
#include <string.h>
#if defined (MEMDBG_ON)
/*
* This software was written by Jim Fougeron jfoug AT cox dot net
* in 2013. No copyright is claimed, and the software is hereby
* placed in the public domain. In case this attempt to disclaim
* copyright and place the software in the public domain is deemed
* null and void, then the software is Copyright (c) 2013 Jim Fougeron
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
/*
* memdbg.h
* Memory management debugging (at runtime)
*
* memdbg contains routines detect, and report memory
* problems, such as double frees, passing bad pointers to
* free, most buffer overwrites. Also, tracking of non-freed
* data, showing memory leaks, can also be shown.
*
* Compilation Options (provided from Makefile CFLAGS)
*
* MEMDBG_ON If this is NOT defined, then memdbg will
* get out of your way, and most normal memory functions
* will be called with no overhead at all.
*/
/* these functions can be called by client code. Normally Memdbg_Used() and
* MemDbg_Display() would be called at program exit. That will dump a list
* of any memory that was not released. The MemDbg_Validate() can be called
* pretty much any time. That function will walk the memory allocation linked
* lists, and sqwack if there are problems, such as overwrites, freed memory that
* has been written to, etc. It would likely be good to call MemDbg_Validate()
* within benchmarking, after every format is tested.
*
* TODO: Add a handle that can be passed to the MemDbg_Used() and MemDbg_Display()
* and a function to get the 'current' state of memory as a handle. Thus, a
* format self test could get a handle BEFORE starting, and then check after, and
* ONLY show leaked memory from the time the handle was obtained, which was at the
* start of the self test. Thus it would only show leaks from that format test.
*
* These functions are NOT thread safe. Do not call them within OMP blocks of code.
* Normally, these would be called at program exit, or within things like format
* self test code, etc, and not within OMP. But this warning is here, so that
* it is known NOT to call within OMP.
*/
extern size_t MemDbg_Used(int show_freed);
extern void MemDbg_Display(FILE *);
extern void MemDbg_Validate(int level);
extern void MemDbg_Validate_msg(int level, const char *pMsg);
extern void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExData);
/* these functions should almost NEVER be called by any client code. They
* are listed here, because the macros need to know their names. Client code
* should almost ALWAYS call malloc() like normal, vs calling MEMDBG_alloc()
* If MEMDBG_alloc() was called, and MEMDBG_ON was not defined, then this
* function would not be declared here, AND at link time, the function would
* not be found.
* NOTE, these functions should be thread safe in OMP builds (using #pragma omp atomic)
* also note, memory allocation within OMP blocks SHOULD be avoided if possible. It is
* very slow, and the thread safety required makes it even slow. This is not only talking
* about these functions here, BUT malloc/free in general in OMP blocks. AVOID doing that
* at almost all costs, and performance will usually go up.
*/
extern void *MEMDBG_alloc(size_t, char *, int);
extern void *MEMDBG_realloc(const void *, size_t, char *, int);
extern void MEMDBG_free(const void *, char *, int);
extern char *MEMDBG_strdup(const char *, char *, int);
#if !defined(__MEMDBG__)
/* we get here on every file compiled EXCEPT memdbg.c */
#undef malloc
#undef realloc
#undef free
#undef strdup
#undef libc_free
#undef libc_calloc
#undef libc_malloc
#define libc_free(a) do {if(a) MEMDBG_libc_free(a); a=0; } while(0)
#define libc_malloc(a) MEMDBG_libc_alloc(a)
#define libc_calloc(a) MEMDBG_libc_calloc(a)
#define malloc(a) MEMDBG_alloc((a),__FILE__,__LINE__)
#define calloc(a) MEMDBG_calloc((a),__FILE__,__LINE__)
#define realloc(a,b) MEMDBG_realloc((a),(b),__FILE__,__LINE__)
/* this code mimicks JtR's FREE_MEM(a) but does it for any MEMDBG_free(a,F,L) call (a hooked free(a) call) */
#define free(a) do { if (a) MEMDBG_free((a),__FILE__,__LINE__); a=0; } while(0)
#define strdup(a) MEMDBG_strdup((a),__FILE__,__LINE__)
#endif
/* pass the file handle to write to (normally stderr) */
#define MEMDBG_PROGRAM_EXIT_CHECKS(a) do { \
if (MemDbg_Used(0) > 0) MemDbg_Display(a); \
MemDbg_Validate_msg2(MEMDBG_VALIDATE_DEEPEST, "At Program Exit", 1); } while(0)
typedef struct MEMDBG_HANDLE_t {
unsigned id;
unsigned alloc_cnt;
size_t mem_size;
} MEMDBG_HANDLE;
/*
* these functions allow taking a memory snapshot, calling some code, then validating that memory
* is the same after the code. This will help catch memory leaks and other such problems, within
* formats and such. Simply get the snapshot, run self tests (or other), when it exits, check
* the snapshot to make sure nothing leaked.
*/
/* returning a struct (or passing as params it not super efficient but this is done so infrequently that this is not an issue. */
MEMDBG_HANDLE MEMDBG_getSnapshot(int id);
/* will not exit on leaks. Does exit, on memory overwrite corruption. */
void MEMDBG_checkSnapshot(MEMDBG_HANDLE);
/* same as MEMDBG_checkSnapshot() but if exit_on_any_leaks is true, will also exit if leaks found. */
void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE, int exit_on_any_leaks);
/*
* the allocations from mem_alloc_tiny() must call this function to flag the memory they allocate
* so it is not flagged as a leak, by these HANDLE snapshot functions. 'tiny' memory is expected
* to leak, until program exit. At that time, any that was not freed, will be shown as leaked.
* THIS function is also thread safe. The other checkSnapshot functions are NOT thread safe.
*/
void MEMDBG_tag_mem_from_alloc_tiny(void *);
#else
/* NOTE, we DO keep one special function here. We make free a little
* smarter. this function gets used, even when we do NOT compile with
* any memory debugging on. This makes free work more like C++ delete,
* in that it is valid to call it on a NULL. Also, it sets the pointer
* to NULL, so that we can call free(x) on x multiple times, without
* causing a crash. NOTE, the multiple frees SHOULD be caught when
* someone builds and runs with MEMDBG_ON. But when it is off, we do
* try to protect the program.
*/
#undef libc_free
#undef libc_calloc
#undef libc_malloc
#define libc_free(a) do {if(a) MEMDBG_libc_free(a); a=0; } while(0)
#define libc_malloc(a) MEMDBG_libc_alloc(a)
#define libc_calloc(a) MEMDBG_libc_calloc(a)
#if !defined(__MEMDBG__)
/* this code mimicks JtR's FREE_MEM(a) but does it for any normal free(a) call */
//extern void MEMDBG_off_free(void *a);
//#define free(a) do { if(a) MEMDBG_off_free(a); a=0; } while(0)
#endif
#define MemDbg_Used(a) 0
#define MemDbg_Display(a)
#define MemDbg_Validate(a)
#define MemDbg_Validate_msg(a,b)
#define MemDbg_Validate_msg2(a,b,c)
#define MEMDBG_PROGRAM_EXIT_CHECKS(a)
#define MEMDBG_tag_mem_from_alloc_tiny(a)
#define MEMDBG_HANDLE int
#define MEMDBG_getSnapshot(a) 0
#define MEMDBG_checkSnapshot(a) if(a) printf(" \b")
#define MEMDBG_checkSnapshot_possible_exit_on_error(a, b) if(a) printf(" \b")
#endif /* MEMDBG_ON */
extern void MEMDBG_libc_free(void *);
extern void *MEMDBG_libc_alloc(size_t size);
extern void *MEMDBG_libc_calloc(size_t size);
#endif /* __MEMDBG_H_ */
|
curvilinear_parity_and_outer_boundary_conditions.h |
// First we define the struct that will be used to store the 10 parity conditions at all gridpoints:
// We store the 10 parity conditions in a struct consisting of 10 integers, one for each condition.
// Note that these conditions can only take one of two values: +1 or -1.
typedef struct parity_conditions {
int8_t parity[10];
} parity_condition;
typedef struct ghostzone_map {
short i0,i1,i2;
} gz_map;
void set_bc_parity_conditions(REAL parity[10], const REAL xx0,const REAL xx1,const REAL xx2,
const REAL xx0_inbounds,const REAL xx1_inbounds,const REAL xx2_inbounds) {
#include "set_parity_conditions.h"
}
void set_up_bc_gz_map_and_parity_conditions(const int Nxx_plus_2NGHOSTS[3], REAL *xx[3],
const REAL dxx[3], const REAL xxmin[3], const REAL xxmax[3],
gz_map *bc_gz_map, parity_condition *bc_parity_conditions) {
LOOP_REGION(0,Nxx_plus_2NGHOSTS[0],0,Nxx_plus_2NGHOSTS[1],0,Nxx_plus_2NGHOSTS[2]) {
REAL xCart[3];
xxCart(xx, i0,i1,i2, xCart);
REAL Cartx = xCart[0];
REAL Carty = xCart[1];
REAL Cartz = xCart[2];
REAL Cart_to_xx0_inbounds,Cart_to_xx1_inbounds,Cart_to_xx2_inbounds;
#include "Cart_to_xx.h"
int i0_inbounds = (int)( (Cart_to_xx0_inbounds - xxmin[0] - (1.0/2.0)*dxx[0] + ((REAL)NGHOSTS)*dxx[0])/dxx[0] + 0.5 );
int i1_inbounds = (int)( (Cart_to_xx1_inbounds - xxmin[1] - (1.0/2.0)*dxx[1] + ((REAL)NGHOSTS)*dxx[1])/dxx[1] + 0.5 );
int i2_inbounds = (int)( (Cart_to_xx2_inbounds - xxmin[2] - (1.0/2.0)*dxx[2] + ((REAL)NGHOSTS)*dxx[2])/dxx[2] + 0.5 );
REAL xCart_orig[3]; for(int ii=0;ii<3;ii++) xCart_orig[ii] = xCart[ii];
xxCart(xx, i0_inbounds,i1_inbounds,i2_inbounds, xCart);
#define EPS_ABS 1e-8
if(fabs( (double)(xCart_orig[0] - xCart[0]) ) > EPS_ABS ||
fabs( (double)(xCart_orig[1] - xCart[1]) ) > EPS_ABS ||
fabs( (double)(xCart_orig[2] - xCart[2]) ) > EPS_ABS) {
printf("Error. Cartesian disagreement: ( %.15e %.15e %.15e ) != ( %.15e %.15e %.15e )\n",
(double)xCart_orig[0],(double)xCart_orig[1],(double)xCart_orig[2],
(double)xCart[0],(double)xCart[1],(double)xCart[2]);
exit(1);
}
if(i0_inbounds-i0 == 0 && i1_inbounds-i1 == 0 && i2_inbounds-i2 == 0) {
bc_gz_map[IDX3(i0,i1,i2)].i0=-1;
bc_gz_map[IDX3(i0,i1,i2)].i1=-1;
bc_gz_map[IDX3(i0,i1,i2)].i2=-1;
for(int which_parity=0; which_parity<10; which_parity++) {
bc_parity_conditions[IDX3(i0,i1,i2)].parity[which_parity] = 1;
}
} else {
bc_gz_map[IDX3(i0,i1,i2)].i0=i0_inbounds;
bc_gz_map[IDX3(i0,i1,i2)].i1=i1_inbounds;
bc_gz_map[IDX3(i0,i1,i2)].i2=i2_inbounds;
const REAL xx0 = xx[0][i0];
const REAL xx1 = xx[1][i1];
const REAL xx2 = xx[2][i2];
const REAL xx0_inbounds = xx[0][i0_inbounds];
const REAL xx1_inbounds = xx[1][i1_inbounds];
const REAL xx2_inbounds = xx[2][i2_inbounds];
REAL REAL_parity_array[10];
set_bc_parity_conditions(REAL_parity_array, xx0,xx1,xx2, xx0_inbounds,xx1_inbounds,xx2_inbounds);
for(int whichparity=0;whichparity<10;whichparity++) {
//printf("Good? Parity %d evaluated to %e\n",whichparity,REAL_parity_array[whichparity]);
// Perform sanity check on parity array output: should be +1 or -1 to within 8 significant digits:
if( (REAL_parity_array[whichparity] > 0 && fabs(REAL_parity_array[whichparity] - (+1)) > 1e-8) ||
(REAL_parity_array[whichparity] <= 0 && fabs(REAL_parity_array[whichparity] - (-1)) > 1e-8) ) {
printf("Error. Parity evaluated to %e , which is not within 8 significant digits of +1 or -1.",REAL_parity_array[whichparity]);
exit(1);
}
if(REAL_parity_array[whichparity] < 0.0) bc_parity_conditions[IDX3(i0,i1,i2)].parity[whichparity] = -1;
if(REAL_parity_array[whichparity] > 0.0) bc_parity_conditions[IDX3(i0,i1,i2)].parity[whichparity] = +1;
}
}
}
}
// Part P6: Declare boundary condition OB_UPDATE macro,
// which updates a single face of the 3D grid cube
// using quadratic polynomial extrapolation.
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;
#define OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) { \
const int idx3 = IDX3(i0,i1,i2); \
if(bc_gz_map[idx3].i0 == -1 && inner==0) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
+3.0*gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*gfs[IDX4(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
} else if(bc_gz_map[idx3].i0 != -1 && inner==1) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
( (REAL)bc_parity_conditions[idx3].parity[evol_gf_parity[which_gf]] )* \
gfs[IDX4(which_gf, \
bc_gz_map[idx3].i0, \
bc_gz_map[idx3].i1, \
bc_gz_map[idx3].i2)]; \
} \
}
// Part P7: Boundary condition driver routine: Apply BCs to all six
// boundary faces of the cube, filling in the innermost
// ghost zone first, and moving outward.
void apply_bcs(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],
gz_map *bc_gz_map,parity_condition *bc_parity_conditions,REAL *gfs) {
#pragma omp parallel for
for(int which_gf=0;which_gf<NUM_EVOL_GFS;which_gf++) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
for(int inner=0;inner<2;inner++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;
OB_UPDATE(inner,which_gf, bc_gz_map,bc_parity_conditions, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;
if(inner==0) { for(int ii=0;ii<3;ii++) {imin[ii]++; imax[ii]--;} }
}
}
}
} |
LAGraph_BF_full1.c | //------------------------------------------------------------------------------
// LAGraph_BF_full1.c: Bellman-Ford single-source shortest paths, returns tree,
// while diagonal of input matrix A needs not to be explicit 0
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
//------------------------------------------------------------------------------
// LAGraph_BF_full1: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full1 performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w.
// TODO: think about the return values
// LAGraph_BF_full1 returns GrB_SUCCESS if it succeeds. In this case, there
// are no negative-weight cycles in the graph, and d, pi, and h are returned.
// The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1,
// where p is the parent node of k-th node in the shortest path. In particular,
// pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest
// path.
// If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the
// GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and
// *ph_output respectively) will be NULL when negative-weight cycle detected.
// Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and
// so on, can be returned, if these errors are found by the underlying
// GrB_* functions.
//------------------------------------------------------------------------------
#define LAGraph_FREE_WORK \
{ \
GrB_free(&d); \
GrB_free(&dmasked); \
GrB_free(&dless); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_LT_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGraph_Free ((void**)&I); \
LAGraph_Free ((void**)&J); \
LAGraph_Free ((void**)&w); \
LAGraph_Free ((void**)&W); \
LAGraph_Free ((void**)&h); \
LAGraph_Free ((void**)&pi); \
}
#define LAGraph_FREE_ALL \
{ \
LAGraph_FREE_WORK \
GrB_free (pd_output); \
GrB_free (ppi_output); \
GrB_free (ph_output); \
}
#include <LAGraph.h>
#include <LAGraphX.h>
#include <LG_internal.h> // from src/utility
typedef void (*LAGraph_binary_function) (void *, const void *, const void *) ;
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF1_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF1_lMIN
(
BF1_Tuple3_struct *z,
const BF1_Tuple3_struct *x,
const BF1_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF1_PLUSrhs
(
BF1_Tuple3_struct *z,
const BF1_Tuple3_struct *x,
const BF1_Tuple3_struct *y
)
{
z->w = x->w + y->w;
z->h = x->h + y->h;
if (x->pi != UINT64_MAX && y->pi != 0)
{
z->pi = y->pi;
}
else
{
z->pi = x->pi;
}
}
void BF1_Identity
(
BF1_Tuple3_struct *z,
const BF1_Tuple3_struct *x
)
{
*z = *x;
}
void BF1_LT
(
bool *z,
const BF1_Tuple3_struct *x,
const BF1_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
*z = true;
}
else
{
*z = false;
}
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full1
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dmasked = NULL, dless = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_UnaryOp BF_Identity_Tuple3;
GrB_BinaryOp BF_LT_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF1_Tuple3_struct *W = NULL;
if (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL)
{
// required argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
LAGRAPH_OK (GrB_Matrix_nrows (&nrows, A)) ;
LAGRAPH_OK (GrB_Matrix_ncols (&ncols, A)) ;
LAGRAPH_OK (GrB_Matrix_nvals (&nz, A));
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ;
}
n = nrows;
if (s >= n || s < 0)
{
LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE);
}
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
LAGRAPH_OK (GrB_Type_new(&BF_Tuple3, sizeof(BF1_Tuple3_struct)));
// GrB_BinaryOp
LAGRAPH_OK (GrB_UnaryOp_new(&BF_Identity_Tuple3,
(void*) (&BF1_Identity), BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_LT_Tuple3,
(LAGraph_binary_function) (&BF1_LT), GrB_BOOL, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF1_lMIN), BF_Tuple3, BF_Tuple3, BF_Tuple3));
LAGRAPH_OK (GrB_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF1_PLUSrhs),
BF_Tuple3, BF_Tuple3, BF_Tuple3));
// GrB_Monoid
BF1_Tuple3_struct BF_identity = (BF1_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
LAGRAPH_OK (GrB_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3));
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_Malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_Malloc (nz, sizeof(double)) ;
W = LAGraph_Malloc (nz, sizeof(BF1_Tuple3_struct)) ;
if (I == NULL || J == NULL || w == NULL || W == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads;
LAGRAPH_OK(LAGraph_GetNumThreads (&nthreads, NULL)) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
W[k] = (BF1_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
LAGRAPH_OK (GrB_Matrix_new(&Atmp, BF_Tuple3, n, n));
LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
LAGraph_Free ((void**)&I);
LAGraph_Free ((void**)&J);
LAGraph_Free ((void**)&W);
LAGraph_Free ((void**)&w);
//--------------------------------------------------------------------------
// create and initialize "distance" vector d, dmasked and dless
//--------------------------------------------------------------------------
LAGRAPH_OK (GrB_Vector_new(&d, BF_Tuple3, n));
// make d dense
LAGRAPH_OK(GrB_Vector_assign_UDT(d, NULL, NULL, (void*)&BF_identity,
GrB_ALL, n, NULL));
// initial distance from s to itself
BF1_Tuple3_struct d0 = (BF1_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s));
// creat dmasked as a sparse vector with only one entry at s
LAGRAPH_OK (GrB_Vector_new(&dmasked, BF_Tuple3, n));
LAGRAPH_OK(GrB_Vector_setElement_UDT(dmasked, &d0, s));
// create dless
LAGRAPH_OK (GrB_Vector_new(&dless, GrB_BOOL, n));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
bool any_dless= true; // if there is any newly found shortest path
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (any_dless && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
LAGRAPH_OK (GrB_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL));
// dless = d .< dtmp
//LAGRAPH_OK (GrB_Vector_clear(dless));
LAGRAPH_OK (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d,
NULL));
// if there is no entry with smaller distance then all shortest paths
// are found
LAGRAPH_OK (GrB_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless,
NULL)) ;
if(any_dless)
{
// update all entries with smaller distances
LAGRAPH_OK (GrB_apply(d, dless, NULL, BF_Identity_Tuple3, dmasked, NULL));
// only use entries that were just updated
LAGRAPH_OK (GrB_Vector_clear(dmasked));
LAGRAPH_OK (GrB_apply(dmasked, dless, NULL, BF_Identity_Tuple3, d, NULL));
//try:
//LAGRAPH_OK (GrB_assign(dmasked, dless, NULL, d, GrB_ALL, n, GrB_DESC_R);
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (any_dless)
{
// execute semiring again to check for negative-weight cycle
LAGRAPH_OK (GrB_vxm(dmasked, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dmasked, Atmp, GrB_NULL));
// dless = d .< dtmp
//LAGRAPH_OK (GrB_Vector_clear(dless));
LAGRAPH_OK (GrB_eWiseMult(dless, NULL, NULL, BF_LT_Tuple3, dmasked, d, NULL));
// if there is no entry with smaller distance then all shortest paths
// are found
LAGRAPH_OK (GrB_reduce (&any_dless, NULL, GxB_LOR_BOOL_MONOID, dless, NULL)) ;
if(any_dless)
{
// printf("A negative-weight cycle found. \n");
LAGraph_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
I = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
W = LAGraph_Malloc (n, sizeof(BF1_Tuple3_struct)) ;
w = LAGraph_Malloc (n, sizeof(double)) ;
h = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
pi = LAGraph_Malloc (n, sizeof(GrB_Index)) ;
if (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &n, d));
for (GrB_Index k = 0; k < n; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
LAGRAPH_OK (GrB_Vector_new(pd_output, GrB_FP64, n));
LAGRAPH_OK (GrB_Vector_new(ppi_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_new(ph_output, GrB_UINT64, n));
LAGRAPH_OK (GrB_Vector_build (*pd_output , I, w , n, GrB_MIN_FP64 ));
LAGRAPH_OK (GrB_Vector_build (*ppi_output, I, pi, n, GrB_MIN_UINT64));
LAGRAPH_OK (GrB_Vector_build (*ph_output , I, h , n, GrB_MIN_UINT64));
LAGraph_FREE_WORK;
return (GrB_SUCCESS) ;
}
|
rawSHA256_ng_fmt_plug.c | /*
* Copyright 2013, epixoip.
* AVX2 support, Copyright (c) 2015 magnum
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that redistribution of source
* retains the above copyright.
*/
#include "arch.h"
#if SIMD_COEF_32
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA256_ng;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA256_ng);
#else
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#if _OPENMP
#include <omp.h>
#if __XOP__
#ifndef OMP_SCALE
#define OMP_SCALE 512 /* AMD */
#endif
#else
#ifndef OMP_SCALE
#define OMP_SCALE 512 /* Intel */
#endif
#endif
#endif
#include "misc.h"
#if !defined(DEBUG) && !defined(WITH_ASAN)
// These compilers claim to be __GNUC__ but warn on gcc pragmas.
#if __GNUC__ && !__INTEL_COMPILER && !__clang__ && !__llvm__ && !_MSC_VER
#pragma GCC optimize 3
#endif
#endif
#include <string.h>
#include "stdint.h"
#include "pseudo_intrinsics.h"
#include "common.h"
#include "formats.h"
#include "aligned.h"
#include "memdbg.h"
#if __MIC__
#define SIMD_TYPE "512/512 MIC 16x"
#elif __AVX512F__
#define SIMD_TYPE "512/512 AVX512 16x"
#elif __AVX2__
#define SIMD_TYPE "256/256 AVX2 8x"
#elif __ALTIVEC__
#define SIMD_TYPE "128/128 AltiVec 4x"
#elif __ARM_NEON
#define SIMD_TYPE "128/128 NEON 4x"
#elif __XOP__
#define SIMD_TYPE "128/128 XOP 4x"
#elif __SSE4_1__
#define SIMD_TYPE "128/128 SSE4.1 4x"
#elif __SSSE3__
#define SIMD_TYPE "128/128 SSSE3 4x"
#else
#define SIMD_TYPE "128/128 SSE2 4x"
#endif
#define BINARY_SIZE 4
#define FORMAT_LABEL "Raw-SHA256-ng"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "SHA256 " SIMD_TYPE
#define VWIDTH SIMD_COEF_32
#define MAXLEN 55
#define PLAINTEXT_LENGTH MAXLEN
#define CIPHERTEXT_LENGTH 64
#define DIGEST_SIZE 32
#define _RAWSHA256_H
#include "rawSHA256_common.h"
#undef _RAWSHA256_H
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT VWIDTH
#define MAX_KEYS_PER_CRYPT VWIDTH
#if __SSE4_1__ && !__AVX2__
#undef GATHER
#define GATHER(x, y, z) \
{ \
x = _mm_cvtsi32_si128( y[index][z] ); \
x = _mm_insert_epi32(x, y[index + 1][z], 1); \
x = _mm_insert_epi32(x, y[index + 2][z], 2); \
x = _mm_insert_epi32(x, y[index + 3][z], 3); \
}
#endif
#define S0(x) \
( \
vxor( \
vroti_epi32(x, -22), \
vxor( \
vroti_epi32(x, -2), \
vroti_epi32(x, -13) \
) \
) \
)
#define S1(x) \
( \
vxor( \
vroti_epi32(x, -25), \
vxor( \
vroti_epi32(x, -6), \
vroti_epi32(x, -11) \
) \
) \
)
#define s0(x) \
( \
vxor( \
vsrli_epi32(x, 3), \
vxor( \
vroti_epi32(x, -7), \
vroti_epi32(x, -18) \
) \
) \
)
#define s1(x) \
( \
vxor( \
vsrli_epi32(x, 10), \
vxor( \
vroti_epi32(x, -17), \
vroti_epi32(x, -19) \
) \
) \
)
#if !VCMOV_EMULATED
#define Maj(x,y,z) vcmov(x, y, vxor(z, y))
#else
#define Maj(x,y,z) vor(vand(x, y), vand(vor(x, y), z))
#endif
#define Ch(x,y,z) vcmov(y, z, x)
#define R(t) \
{ \
w[t] = vadd_epi32(s1(w[t - 2]), w[t - 7]); \
w[t] = vadd_epi32(s0(w[t - 15]), w[t]); \
w[t] = vadd_epi32( w[t - 16], w[t]); \
}
#define SHA256_STEP(a,b,c,d,e,f,g,h,x,K) \
{ \
if (x > 15) R(x); \
tmp1 = vadd_epi32(h, S1(e)); \
tmp1 = vadd_epi32(tmp1, Ch(e,f,g)); \
tmp1 = vadd_epi32(tmp1, vset1_epi32(K)); \
tmp1 = vadd_epi32(tmp1, w[x]); \
tmp2 = vadd_epi32(S0(a),Maj(a,b,c)); \
d = vadd_epi32(tmp1, d); \
h = vadd_epi32(tmp1, tmp2); \
}
static uint32_t (*saved_key)[64];
static uint32_t *crypt_key[ 8];
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(*saved_key), VWIDTH * 4);
for (i = 0; i < 8; i++)
crypt_key[i] = mem_calloc_align(self->params.max_keys_per_crypt,
sizeof(uint32_t), VWIDTH * 4);
}
static void done(void)
{
int i;
for (i = 0; i < 8; i++)
MEM_FREE(crypt_key[i]);
MEM_FREE(saved_key);
}
static int get_hash_0(int index) { return crypt_key[0][index] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_key[0][index] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_key[0][index] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_key[0][index] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_key[0][index] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_key[0][index] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_key[0][index] & PH_MASK_6; }
static void set_key(char *key, int index)
{
uint32_t *buf32 = (uint32_t*) &saved_key[index];
uint8_t *buf8 = (uint8_t*) buf32;
int len = 0;
while (*key)
buf8[len++] = *key++;
buf32[15] = len << 3;
buf8[len++] = 0x80;
while (buf8[len] && len <= MAXLEN)
buf8[len++] = 0;
}
static char *get_key(int index)
{
uint32_t *buf = (uint32_t*) &saved_key[index];
static char out[MAXLEN + 1];
int len = buf[15] >> 3;
memset(out, 0, MAXLEN + 1);
memcpy(out, buf, len);
return (char*) out;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += VWIDTH)
#endif
{
vtype a, b, c, d, e, f, g, h;
vtype w[64], tmp1, tmp2;
int i;
#if __SSE4_1__ && !__AVX2__
for (i=0; i < 16; i++) GATHER(w[i], saved_key, i);
for (i=0; i < 15; i++) vswap32(w[i]);
#else
JTR_ALIGN(VWIDTH * 4) uint32_t __w[16][VWIDTH];
int j;
for (i=0; i < VWIDTH; i++)
for (j=0; j < 16; j++)
__w[j][i] = saved_key[index + i][j];
for (i=0; i < 15; i++)
{
w[i] = vload((vtype*) __w[i]);
vswap32(w[i]);
}
w[15] = vload((vtype*) __w[15]);
#endif
a = vset1_epi32(0x6a09e667);
b = vset1_epi32(0xbb67ae85);
c = vset1_epi32(0x3c6ef372);
d = vset1_epi32(0xa54ff53a);
e = vset1_epi32(0x510e527f);
f = vset1_epi32(0x9b05688c);
g = vset1_epi32(0x1f83d9ab);
h = vset1_epi32(0x5be0cd19);
SHA256_STEP(a, b, c, d, e, f, g, h, 0, 0x428a2f98);
SHA256_STEP(h, a, b, c, d, e, f, g, 1, 0x71374491);
SHA256_STEP(g, h, a, b, c, d, e, f, 2, 0xb5c0fbcf);
SHA256_STEP(f, g, h, a, b, c, d, e, 3, 0xe9b5dba5);
SHA256_STEP(e, f, g, h, a, b, c, d, 4, 0x3956c25b);
SHA256_STEP(d, e, f, g, h, a, b, c, 5, 0x59f111f1);
SHA256_STEP(c, d, e, f, g, h, a, b, 6, 0x923f82a4);
SHA256_STEP(b, c, d, e, f, g, h, a, 7, 0xab1c5ed5);
SHA256_STEP(a, b, c, d, e, f, g, h, 8, 0xd807aa98);
SHA256_STEP(h, a, b, c, d, e, f, g, 9, 0x12835b01);
SHA256_STEP(g, h, a, b, c, d, e, f, 10, 0x243185be);
SHA256_STEP(f, g, h, a, b, c, d, e, 11, 0x550c7dc3);
SHA256_STEP(e, f, g, h, a, b, c, d, 12, 0x72be5d74);
SHA256_STEP(d, e, f, g, h, a, b, c, 13, 0x80deb1fe);
SHA256_STEP(c, d, e, f, g, h, a, b, 14, 0x9bdc06a7);
SHA256_STEP(b, c, d, e, f, g, h, a, 15, 0xc19bf174);
SHA256_STEP(a, b, c, d, e, f, g, h, 16, 0xe49b69c1);
SHA256_STEP(h, a, b, c, d, e, f, g, 17, 0xefbe4786);
SHA256_STEP(g, h, a, b, c, d, e, f, 18, 0x0fc19dc6);
SHA256_STEP(f, g, h, a, b, c, d, e, 19, 0x240ca1cc);
SHA256_STEP(e, f, g, h, a, b, c, d, 20, 0x2de92c6f);
SHA256_STEP(d, e, f, g, h, a, b, c, 21, 0x4a7484aa);
SHA256_STEP(c, d, e, f, g, h, a, b, 22, 0x5cb0a9dc);
SHA256_STEP(b, c, d, e, f, g, h, a, 23, 0x76f988da);
SHA256_STEP(a, b, c, d, e, f, g, h, 24, 0x983e5152);
SHA256_STEP(h, a, b, c, d, e, f, g, 25, 0xa831c66d);
SHA256_STEP(g, h, a, b, c, d, e, f, 26, 0xb00327c8);
SHA256_STEP(f, g, h, a, b, c, d, e, 27, 0xbf597fc7);
SHA256_STEP(e, f, g, h, a, b, c, d, 28, 0xc6e00bf3);
SHA256_STEP(d, e, f, g, h, a, b, c, 29, 0xd5a79147);
SHA256_STEP(c, d, e, f, g, h, a, b, 30, 0x06ca6351);
SHA256_STEP(b, c, d, e, f, g, h, a, 31, 0x14292967);
SHA256_STEP(a, b, c, d, e, f, g, h, 32, 0x27b70a85);
SHA256_STEP(h, a, b, c, d, e, f, g, 33, 0x2e1b2138);
SHA256_STEP(g, h, a, b, c, d, e, f, 34, 0x4d2c6dfc);
SHA256_STEP(f, g, h, a, b, c, d, e, 35, 0x53380d13);
SHA256_STEP(e, f, g, h, a, b, c, d, 36, 0x650a7354);
SHA256_STEP(d, e, f, g, h, a, b, c, 37, 0x766a0abb);
SHA256_STEP(c, d, e, f, g, h, a, b, 38, 0x81c2c92e);
SHA256_STEP(b, c, d, e, f, g, h, a, 39, 0x92722c85);
SHA256_STEP(a, b, c, d, e, f, g, h, 40, 0xa2bfe8a1);
SHA256_STEP(h, a, b, c, d, e, f, g, 41, 0xa81a664b);
SHA256_STEP(g, h, a, b, c, d, e, f, 42, 0xc24b8b70);
SHA256_STEP(f, g, h, a, b, c, d, e, 43, 0xc76c51a3);
SHA256_STEP(e, f, g, h, a, b, c, d, 44, 0xd192e819);
SHA256_STEP(d, e, f, g, h, a, b, c, 45, 0xd6990624);
SHA256_STEP(c, d, e, f, g, h, a, b, 46, 0xf40e3585);
SHA256_STEP(b, c, d, e, f, g, h, a, 47, 0x106aa070);
SHA256_STEP(a, b, c, d, e, f, g, h, 48, 0x19a4c116);
SHA256_STEP(h, a, b, c, d, e, f, g, 49, 0x1e376c08);
SHA256_STEP(g, h, a, b, c, d, e, f, 50, 0x2748774c);
SHA256_STEP(f, g, h, a, b, c, d, e, 51, 0x34b0bcb5);
SHA256_STEP(e, f, g, h, a, b, c, d, 52, 0x391c0cb3);
SHA256_STEP(d, e, f, g, h, a, b, c, 53, 0x4ed8aa4a);
SHA256_STEP(c, d, e, f, g, h, a, b, 54, 0x5b9cca4f);
SHA256_STEP(b, c, d, e, f, g, h, a, 55, 0x682e6ff3);
SHA256_STEP(a, b, c, d, e, f, g, h, 56, 0x748f82ee);
SHA256_STEP(h, a, b, c, d, e, f, g, 57, 0x78a5636f);
SHA256_STEP(g, h, a, b, c, d, e, f, 58, 0x84c87814);
SHA256_STEP(f, g, h, a, b, c, d, e, 59, 0x8cc70208);
SHA256_STEP(e, f, g, h, a, b, c, d, 60, 0x90befffa);
SHA256_STEP(d, e, f, g, h, a, b, c, 61, 0xa4506ceb);
SHA256_STEP(c, d, e, f, g, h, a, b, 62, 0xbef9a3f7);
SHA256_STEP(b, c, d, e, f, g, h, a, 63, 0xc67178f2);
a = vadd_epi32(a, vset1_epi32(0x6a09e667));
b = vadd_epi32(b, vset1_epi32(0xbb67ae85));
c = vadd_epi32(c, vset1_epi32(0x3c6ef372));
d = vadd_epi32(d, vset1_epi32(0xa54ff53a));
e = vadd_epi32(e, vset1_epi32(0x510e527f));
f = vadd_epi32(f, vset1_epi32(0x9b05688c));
g = vadd_epi32(g, vset1_epi32(0x1f83d9ab));
h = vadd_epi32(h, vset1_epi32(0x5be0cd19));
vstore((vtype*) &crypt_key[0][index], a);
vstore((vtype*) &crypt_key[1][index], b);
vstore((vtype*) &crypt_key[2][index], c);
vstore((vtype*) &crypt_key[3][index], d);
vstore((vtype*) &crypt_key[4][index], e);
vstore((vtype*) &crypt_key[5][index], f);
vstore((vtype*) &crypt_key[6][index], g);
vstore((vtype*) &crypt_key[7][index], h);
}
return count;
}
static int cmp_all(void *binary, int count)
{
vtype bin;
vtype digest;
int i = 0;
#ifdef _OPENMP
for (i = 0; i < count; i += VWIDTH)
#endif
{
digest = vload((vtype*) &crypt_key[0][i]);
bin = vset1_epi32(((uint32_t*) binary)[0]);
if (vanyeq_epi32(bin, digest))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return ((uint32_t*) binary)[0] == crypt_key[0][index];
}
static int cmp_exact(char *source, int index)
{
ARCH_WORD_32 *binary = sha256_common_binary(source);
int i;
for (i = 0; i < 8; i++)
if (((uint32_t*) binary)[i] != crypt_key[i][index])
return 0;
return 1;
}
struct fmt_main fmt_rawSHA256_ng = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
MAXLEN,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
sha256_common_tests
}, {
init,
done,
fmt_default_reset,
sha256_common_prepare,
sha256_common_valid,
sha256_common_split,
sha256_common_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* SIMD_COEF_32 */
|
add.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add()
{
int i, j, k, m;
// if (timeron) timer_start(t_add);
#ifdef SPEC_USE_INNER_SIMD
#pragma omp target teams distribute parallel for private(i,j,k,m) collapse(2)
#else
#pragma omp target teams distribute parallel for simd collapse(4)
#endif
for (k = 1; k <= nz2; k++) {
for (j = 1; j <= ny2; j++) {
#ifdef SPEC_USE_INNER_SIMD
#pragma omp simd
#endif
for (i = 1; i <= nx2; i++) {
for (m = 0; m < 5; m++) {
u[m][k][j][i] = u[m][k][j][i] + rhs[m][k][j][i];
}
}
}
}
// if (timeron) timer_stop(t_add);
}
|
expected_output.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <polybench.h>
#include "syr2k.h"
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/*syr2k.c: this file is part of PolyBench/C*/
/*Include polybench common header.*/
/*Include benchmark-specific header.*/
/*Array initialization.*/
static void init_array(int n, int m, double *alpha, double *beta, double C[1200][1200], double A[1200][1000], double B[1200][1000]) {
int i, j;
*alpha = 1.5;
*beta = 1.2;
for(i = 0; i < n; i++)
for(j = 0; j < m; j++) {
A[i][j] = (double) ((i * j + 1) % n) / n;
B[i][j] = (double) ((i * j + 2) % m) / m;
}
for(i = 0; i < n; i++)
for(j = 0; j < n; j++) {
C[i][j] = (double) ((i * j + 3) % n) / m;
}
}
/*DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output.*/
static void print_array(int n, double C[1200][1200]) {
int i, j;
fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n");
fprintf(stderr, "begin dump: %s", "C");
for(i = 0; i < n; i++)
for(j = 0; j < n; j++) {
if((i * n + j) % 20 == 0) fprintf(stderr, "\n");
fprintf(stderr, "%0.2lf ", C[i][j]);
}
fprintf(stderr, "\nend dump: %s\n", "C");
fprintf(stderr, "==END DUMP_ARRAYS==\n");
}
/*Main computational kernel. The whole function will be timed,
including the call and return.*/
static void kernel_syr2k(int n, int m, double alpha, double beta, double C[1200][1200], double A[1200][1000], double B[1200][1000]) {
int i, j, k;
#pragma omp parallel for default(shared) private(i, j, k) firstprivate(n, beta, m, alpha, A, B)
for(i = 0; i < n; i++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(i, beta)
for(j = 0; j <= i; j++)
C[i][j] *= beta;
// #pragma omp parallel for default(shared) private(k, j) firstprivate(m, i, alpha, A, B)
for(k = 0; k < m; k++) {
// #pragma omp parallel for default(shared) private(j) firstprivate(i, k, alpha, A, B)
for(j = 0; j <= i; j++) {
C[i][j] += A[j][k] * alpha * B[i][k] + B[j][k] * alpha * A[i][k];
}
}
}
}
int main(int argc, char **argv) {
/*Retrieve problem size.*/
int n = 1200;
int m = 1000;
/*Variable declaration/allocation.*/
double alpha;
double beta;
double (*C)[1200][1200];
C = (double (*)[1200][1200]) polybench_alloc_data((1200 + 0) * (1200 + 0), sizeof(double));
;
double (*A)[1200][1000];
A = (double (*)[1200][1000]) polybench_alloc_data((1200 + 0) * (1000 + 0), sizeof(double));
;
double (*B)[1200][1000];
B = (double (*)[1200][1000]) polybench_alloc_data((1200 + 0) * (1000 + 0), sizeof(double));
;
/*Initialize array(s).*/
init_array(n, m, &alpha, &beta, *C, *A, *B);
/*Start timer.*/
;
/*Run kernel.*/
kernel_syr2k(n, m, alpha, beta, *C, *A, *B);
/*Stop and print timer.*/
;
;
/*Prevent dead-code elimination. All live-out data must be printed
by the function call in argument.*/
if(argc > 42 && !strcmp(argv[0], "")) print_array(n, *C);
/*Be clean.*/
free((void *) C);
;
free((void *) A);
;
free((void *) B);
;
return 0;
}
|
GB_binop__iseq_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int16)
// A*D function (colscale): GB (_AxD__iseq_int16)
// D*A function (rowscale): GB (_DxB__iseq_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int16)
// C=scalar+B GB (_bind1st__iseq_int16)
// C=scalar+B' GB (_bind1st_tran__iseq_int16)
// C=A+scalar GB (_bind2nd__iseq_int16)
// C=A'+scalar GB (_bind2nd_tran__iseq_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT16 || GxB_NO_ISEQ_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
resize-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file resize-inl.h
* \brief image resize operator using opencv and only support bilinear resize
* \author Jake Lee
*/
#ifndef MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#define MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
#include <mxnet/base.h>
#include <vector>
#include "../mxnet_op.h"
#include "../operator_common.h"
#include "image_utils.h"
#if MXNET_USE_OPENCV
#include <opencv2/opencv.hpp>
#endif // MXNET_USE_OPENCV
namespace mxnet {
namespace op {
namespace image {
using namespace mshadow;
#if MXNET_USE_CUDA
template<typename DType, typename T, typename Acctype>
void ResizeImplCUDA(Stream<gpu> *s,
const T input,
const T output);
#endif // MXNET_USE_CUDA
struct ResizeParam : public dmlc::Parameter<ResizeParam> {
mxnet::Tuple<int> size;
bool keep_ratio;
int interp;
DMLC_DECLARE_PARAMETER(ResizeParam) {
DMLC_DECLARE_FIELD(size)
.set_default(mxnet::Tuple<int>())
.describe("Size of new image. Could be (width, height) or (size)");
DMLC_DECLARE_FIELD(keep_ratio)
.describe("Whether to resize the short edge or both edges to `size`, "
"if size is give as an integer.")
.set_default(false);
DMLC_DECLARE_FIELD(interp)
.set_default(1)
.describe("Interpolation method for resizing. By default uses bilinear interpolation"
"Options are INTER_NEAREST - a nearest-neighbor interpolation"
"INTER_LINEAR - a bilinear interpolation"
"INTER_AREA - resampling using pixel area relation"
"INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood"
"INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood"
"Note that the GPU version only support bilinear interpolation(1)");
}
};
// handle the keep ratio param
inline SizeParam GetHeightAndWidth(int data_h,
int data_w,
const ResizeParam& param) {
CHECK((param.size.ndim() == 1) || (param.size.ndim() == 2))
<< "Input size dimension must be 1 or 2, but got "
<< param.size.ndim();
int resized_h;
int resized_w;
if (param.size.ndim() == 1) {
CHECK_GT(param.size[0], 0)
<< "Input size should be greater than 0, but got "
<< param.size[0];
if (!param.keep_ratio) {
resized_h = param.size[0];
resized_w = param.size[0];
} else {
if (data_h > data_w) {
resized_w = param.size[0];
resized_h = static_cast<int>(data_h * resized_w / data_w);
} else {
resized_h = param.size[0];
resized_w = static_cast<int>(data_w * resized_h / data_h);
}
}
} else {
CHECK_GT(param.size[0], 0)
<< "Input width should be greater than 0, but got "
<< param.size[0];
CHECK_GT(param.size[1], 0)
<< "Input height should be greater than 0, but got "
<< param.size[1];
resized_h = param.size[1];
resized_w = param.size[0];
}
return SizeParam(resized_h, resized_w);
}
inline bool ResizeShapeImpl(const ResizeParam& param,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
// input attrs should only be (h, w, c) or (n, h, w, c)
CHECK((in_attrs->at(0).ndim() == 3U) || (in_attrs->at(0).ndim() == 4U))
<< "Input image dimension should be 3 or 4 but got "
<< in_attrs->at(0).ndim();
const auto& ishape = (*in_attrs)[0];
SizeParam size;
if (ishape.ndim() == 3) {
size = GetHeightAndWidth(ishape[H], ishape[W], param);
SHAPE_ASSIGN_CHECK(*out_attrs, 0, mxnet::TShape({size.height, size.width, ishape[C]}));
} else {
size = GetHeightAndWidth(ishape[kH], ishape[kW], param);
SHAPE_ASSIGN_CHECK(*out_attrs, 0,
mxnet::TShape({ishape[N], size.height, size.width, ishape[kC]}));
}
return true;
}
inline bool ResizeShape(const nnvm::NodeAttrs& attrs,
mxnet::ShapeVector *in_attrs,
mxnet::ShapeVector *out_attrs) {
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
return ResizeShapeImpl(param, in_attrs, out_attrs);
}
inline void ResizeImpl(const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs,
const int height,
const int width,
const int interp,
const int input_index = 0,
const int output_index = 0) {
#if MXNET_USE_OPENCV
CHECK_NE(inputs[0].type_flag_, mshadow::kFloat16) << "opencv image mat doesn't support fp16";
CHECK((inputs[0].type_flag_ != mshadow::kInt32) || (inputs[0].type_flag_ != mshadow::kInt64))
<< "opencv resize doesn't support int32, int64";
// mapping to opencv matrix element type according to channel
const int DTYPE[] = {CV_32F, CV_64F, -1, CV_8U, CV_32S};
if (inputs[0].ndim() == 3) {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[C]);
cv::Mat buf(inputs[0].shape_[H], inputs[0].shape_[W], cv_type, inputs[0].dptr_);
cv::Mat dst(outputs[0].shape_[H], outputs[0].shape_[W], cv_type, outputs[0].dptr_);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr_);
} else {
const int cv_type = CV_MAKETYPE(DTYPE[inputs[0].type_flag_], inputs[0].shape_[kC]);
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
cv::Mat buf(inputs[0].shape_[kH], inputs[0].shape_[kW], cv_type,
inputs[0].dptr<DType>() + input_index);
cv::Mat dst(outputs[0].shape_[kH], outputs[0].shape_[kW], cv_type,
outputs[0].dptr<DType>() + output_index);
cv::resize(buf, dst, cv::Size(width, height), 0, 0, interp);
CHECK(!dst.empty());
CHECK_EQ(static_cast<void*>(dst.ptr()), outputs[0].dptr<DType>() + output_index);
});
}
#else
LOG(FATAL) << "Build with USE_OPENCV=1 for image resize operator.";
#endif // MXNET_USE_OPENCV
}
template <typename xpu>
inline void ResizeImplWrapper(const ResizeParam& param,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<TBlob> &outputs) {
SizeParam size;
if (std::is_same<xpu, gpu>::value) {
#if MXNET_USE_CUDA
CHECK(param.interp == 1) << "interp should be 1 for using Resize on GPU.";
mshadow::Stream<gpu> *s = ctx.get_stream<gpu>();
MSHADOW_TYPE_SWITCH(inputs[0].type_flag_, DType, {
if (inputs[0].ndim() == 3) {
Tensor<gpu, 3, DType> input = inputs[0].get<gpu, 3, DType>(s);
Tensor<gpu, 3, DType> output = outputs[0].get<gpu, 3, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 3, DType>, float>(s, input, output);
} else {
Tensor<gpu, 4, DType> input = inputs[0].get<gpu, 4, DType>(s);
Tensor<gpu, 4, DType> output = outputs[0].get<gpu, 4, DType>(s);
ResizeImplCUDA<DType, Tensor<gpu, 4, DType>, float>(s, input, output);
}
});
#endif // MXNET_USE_CUDA
} else if (inputs[0].ndim() == 3) {
size = GetHeightAndWidth(inputs[0].shape_[H], inputs[0].shape_[W], param);
ResizeImpl(inputs, outputs, size.height, size.width, param.interp);
} else {
size = GetHeightAndWidth(inputs[0].shape_[kH], inputs[0].shape_[kW], param);
const auto batch_size = inputs[0].shape_[N];
const auto input_step = inputs[0].shape_[kH] * inputs[0].shape_[kW] * inputs[0].shape_[kC];
const auto output_step = size.height * size.width * inputs[0].shape_[kC];
#pragma omp parallel for
for (auto i = 0; i < batch_size; ++i) {
ResizeImpl(inputs, outputs, size.height, size.width,
param.interp, i * input_step, i * output_step);
}
}
}
template <typename xpu>
inline void Resize(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
CHECK_EQ(outputs.size(), 1U);
const ResizeParam& param = nnvm::get<ResizeParam>(attrs.parsed);
ResizeImplWrapper<xpu>(param, ctx, inputs, outputs);
}
} // namespace image
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_IMAGE_RESIZE_INL_H_
|
BlockCorr.c | #include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "list.h"
#include "BlockCorr.h"
// compute pearson correlation coefficient between time series at positions i1 and i2 in d (of length l)
// NOTE: result may be nan, if the variance of any of the time series is zero, or if
// any of the time series contains nans
double pearson2(const double *d, const long int i, const long int j, const long int l) {
long int k;
double sum_i = 0.0, sum_j = 0.0, sum_ii = 0.0, sum_jj = 0.0, sum_ij = 0.0;
#pragma omp simd
for (k = 0; k < l; k++) {
sum_i += d[i*l+k];
sum_j += d[j*l+k];
sum_ii += d[i*l+k]*d[i*l+k];
sum_jj += d[j*l+k]*d[j*l+k];
sum_ij += d[i*l+k]*d[j*l+k];
}
return (l*sum_ij-sum_i*sum_j)/sqrt((l*sum_ii-sum_i*sum_i)*(l*sum_jj-sum_j*sum_j));
}
// compute n-by-n correlation matrix for complete data set d with n rows and l columns
double *pearson(const double *d, long int n, long int l) {
double *sums = (double *) calloc(n, sizeof (double));
double *sumsqs = (double *) calloc(n, sizeof (double));
double *sqsums = (double *) calloc(n, sizeof (double));
double *coef = (double *) calloc(n*n, sizeof (double));
if (!coef || !sums || !sumsqs || !sqsums) return NULL;
#pragma omp parallel for
for (long int i = 0; i < n; i++) {
#pragma omp simd
for (long k = 0; k < l; k++) {
sums[i] += d[i*l+k];
sumsqs[i] += d[i*l+k]*d[i*l+k];
}
}
#pragma omp simd
for (long int i = 0; i < n; i++) {
sqsums[i] = sums[i]*sums[i];
}
#pragma omp parallel for
for (long int ij = 0; ij < n*(n-1)/2; ij++) {
double sum_ij = 0.0;
long int i = ij / n;
long int j = ij % n;
if (j <= i) {
i = n - i - 2;
j = n - j - 1;
}
#pragma omp simd
for (long int k = 0; k < l; k++) {
sum_ij += d[i*l+k]*d[j*l+k];
}
coef[i*n+j] = (l*sum_ij-sums[i]*sums[j])/sqrt((l*sumsqs[i]-sqsums[i])*(l*sumsqs[j]-sqsums[j]));
coef[j*n+i] = coef[i*n+j];
}
#pragma omp simd
for (long int i = 0; i < n; i++) {
coef[i*n+i] = 1.0;
}
free(sums);
free(sumsqs);
free(sqsums);
return coef;
}
// compute upper triangular part of the correlation matrix
// and store as a vector of length n*(n+1)/2
double *
pearson_triu(const double *d, long int n, long int l) {
long int i, j, k;
double *sums = (double *) calloc(n, sizeof (double));
double *sumsqs = (double *) calloc(n, sizeof (double));
double *coef = (double *) calloc(n*(n+1)/2, sizeof (double));
double sum_ij;
if (!coef) return NULL;
#pragma omp parallel for
for (i = 0; i < n; i++) {
#pragma omp simd
for (k = 0; k < l; k++) {
sums[i] += d[i*l+k];
sumsqs[i] += d[i*l+k]*d[i*l+k];
}
}
#pragma omp parallel for collapse(2) private (sum_ij) schedule(dynamic)
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if (i > j) continue;
sum_ij = 0.0;
#pragma omp simd
for (k = 0; k < l; k++) {
sum_ij += d[i*l+k]*d[j*l+k];
}
coef[i*n-i*(i+1)/2+j] = (l*sum_ij-sums[i]*sums[j])/sqrt((l*sumsqs[i]-sums[i]*sums[i])*(l*sumsqs[j]-sums[j]*sums[j]));
}
}
free(sums);
free(sumsqs);
return coef;
}
// find equivalence classes in a time series data set
//
// d: data set with n rows (time series) and l columns (time steps)
// alpha: transitivity threshold
// kappa: minimum cluster size
// max_nan: maximum number of nans within a pivot time series
long int *cluster(const double *d, long int n, long int l, double alpha, long int kappa, long int max_nan)
{
long int corr_count, pivot, i, nan_count;
long int *membs;
double rho;
llist_li timeseries_l;
llist_li *clustermemb_pos_l;
llist_li *clustermemb_neg_l;
llist_li *noise_l;
llist_ptr cluster_l;
llist_item_li *iter_li, *iter_li_next;
llist_item_ptr *iter_ptr;
membs = (long int *) calloc(n, sizeof (long int));
if (!membs) {
return NULL;
}
// initialize time series index list
llist_li_init(×eries_l);
for (i = 0; i < n; i++) {
llist_li_push_back(×eries_l, i);
}
// initialize cluster list
llist_ptr_init(&cluster_l);
// initialize noise cluster and add to cluster list (always at position 1)
noise_l = (llist_li *) malloc(sizeof(llist_li));
if (!noise_l) return NULL;
llist_li_init(noise_l);
llist_ptr_push_back(&cluster_l, noise_l);
// iterate over all time series until none is left
corr_count = 0;
while (llist_li_size(×eries_l) > 0) {
printf("\r% 9ld left...", llist_li_size(×eries_l));
pivot = llist_li_front(×eries_l);
// check if pivot contains too many nans to be considered a pivot
nan_count = 0;
for (i = 0; i < l; i++)
if (isnan(d[pivot*l+i]))
nan_count++;
if (nan_count > max_nan) {
// add pivot to noise cluster
llist_li_relink(timeseries_l.last, ×eries_l, noise_l);
continue;
}
// initialize positive and negative clusters
clustermemb_pos_l = (llist_li *) malloc(sizeof(llist_li));
if (!clustermemb_pos_l) return NULL;
llist_li_init(clustermemb_pos_l);
clustermemb_neg_l = (llist_li *) malloc(sizeof(llist_li));
if (!clustermemb_neg_l) return NULL;
llist_li_init(clustermemb_neg_l);
// compute all correlations between pivot and remaining time series
// and create positive and negative protoclusters
iter_li = timeseries_l.first;
while (iter_li != NULL) {
iter_li_next = iter_li->next; // store successor before relinking
rho = pearson2(d, pivot, iter_li->data, l);
corr_count++;
if (isnan(rho)) {
// NOTE: we add the tested time series to the noise cluster, this might not be
// a good idea if nan value occurs because there are no overlapping valid time steps
// in pivot and tested time series
llist_li_relink(iter_li, ×eries_l, noise_l);
} else {
if (rho >= alpha) llist_li_relink(iter_li, ×eries_l, clustermemb_pos_l);
if (rho <= -alpha) llist_li_relink(iter_li, ×eries_l, clustermemb_neg_l);
}
iter_li = iter_li_next;
}
// check whether protoclusters fulfill the minimium size constraints
if (llist_li_size(clustermemb_pos_l) >= kappa) {
// add to final clustering
llist_ptr_push_back(&cluster_l, clustermemb_pos_l);
} else {
// relink all time series to noise cluster
llist_li_relink_all(clustermemb_pos_l, noise_l);
free(clustermemb_pos_l);
}
if (llist_li_size(clustermemb_neg_l) >= kappa) {
// add to final clustering
llist_ptr_push_back(&cluster_l, clustermemb_neg_l);
} else {
// relink all time series to noise cluster
llist_li_relink_all(clustermemb_neg_l, noise_l);
free(clustermemb_neg_l);
}
}
printf("\rclustering finished with %ld correlation computations.\n", corr_count);
// prepare output array with cluster assignments
// skip noise cluster (membs id=0 during initialization)
i = 1;
iter_ptr = cluster_l.first->next;
while (iter_ptr != NULL) {
iter_li = ((llist_li *) iter_ptr->data)->first;
while (iter_li != NULL) {
membs[iter_li->data] = i;
iter_li = iter_li->next;
}
llist_li_destroy((llist_li *) iter_ptr->data);
free(iter_ptr->data);
iter_ptr = iter_ptr->next;
i++;
}
llist_ptr_destroy(&cluster_l);
llist_li_destroy(×eries_l);
return membs;
}
// assumes that the key really is somewhere in the array
long int binary_search_li(long int key, long int *arr, long int len) {
long int liml, limr, rpos;
liml = 0;
limr = len-1;
do {
rpos = (liml+limr)/2;
if (arr[rpos] < key) {
liml = rpos + 1;
} else if (arr[rpos] > key) {
limr = rpos - 1;
} else {
break; // found it
}
} while (liml <= limr);
return rpos;
}
// COREQ
// find equivalence classes in a time series data set and estimate correlations
// NOTE: no kappa, no noise cluster estimation, no negative clusters, no NaN handling
//
// INPUT
// d: data set with n rows (time series) and l columns (time steps)
// n, l: see above
// alpha: transitivity threshold
// est_strat: estimation strategy (pivot-based, average-based)
//
// OUTPUT
// membs: uninitialized pointer to array for class memberships
// pivots: uninitialized pointer to array for pivot indices
// cluster_corrs: uninitialized pointer to array for class correlations (upper triangular indexing)
// n_clus: total number of resulting clusters
// corr_comps: total number of correlation computations required for clustering/estimation
long int *
coreq(const double *d, long int n, long int l, double alpha, coreq_estimation_strategy_t est_strat,
long int **membs, long int **pivots, double **cluster_corrs,
long int *n_clus, long int *corr_comps)
{
long int pivot, remaining, i, j, k, rpos, sample_size;
double rho, sum_ij;
llist_li timeseries_l; // holds all unprocessed time series
llist_li pivot_l; // holds all pivot objects selected so far
llist_li *clustermemb_l; // holds all time series assigned to a cluster
llist_ptr cluster_l; // holds all clusters
llist_ptr correlations_idx_l; // holds corrs between all pivots and time series (indices)
llist_ptr correlations_val_l; // holds corrs between all pivots and time series (correlations)
llist_li correlations_cnt_l; // holds the number of entries in the previous lists
llist_item_li *iter_li, *iter_li_next;
llist_item_ptr *iter_ptr, *iter_idx, *iter_val;
long int **cluster_arr; // hold all clusters with their members
long int *cluster_size_arr; // hold all cluster sizes
// precompute some data statistics for fast correlation computation
double *sums = (double *) calloc(n, sizeof (double));
double *sumsqs = (double *) calloc(n, sizeof (double));
if ((!sums) || (!sumsqs)) return NULL;
#pragma omp parallel for
for (i = 0; i < n; i++) {
#pragma omp simd
for (k = 0; k < l; k++) {
sums[i] += d[i*l+k];
sumsqs[i] += d[i*l+k]*d[i*l+k];
}
}
*membs = (long int *) calloc(n, sizeof(long int));
if (!*membs) return NULL;
// initialize time series index list
llist_li_init(×eries_l);
for (i = 0; i < n; i++) {
llist_li_push_back(×eries_l, i);
}
// initialize lists
llist_ptr_init(&cluster_l);
llist_ptr_init(&correlations_idx_l);
llist_ptr_init(&correlations_val_l);
llist_li_init(&correlations_cnt_l);
llist_li_init(&pivot_l);
// iterate over all time series until none is left
*corr_comps = 0;
while (llist_li_size(×eries_l) > 0) {
remaining = llist_li_size(×eries_l);
printf("\r% 9ld left...", remaining);
// select pivot time series and create its correlation container
pivot = llist_li_front(×eries_l);
llist_li_push_back(&pivot_l, pivot);
llist_ptr_push_back(&correlations_idx_l, (long int *) calloc(remaining, sizeof (long int)));
llist_ptr_push_back(&correlations_val_l, (double *) calloc(remaining, sizeof (double)));
llist_li_push_back(&correlations_cnt_l, remaining);
// initialize cluster container
clustermemb_l = (llist_li *) malloc(sizeof (llist_li));
if (!clustermemb_l) return NULL;
llist_li_init(clustermemb_l);
// compute all correlations between pivot and remaining time series
iter_li = timeseries_l.first;
i = 0;
while (iter_li != NULL) {
iter_li_next = iter_li->next; // store successor before relinking
// compute correlation
sum_ij = 0.0;
#pragma omp simd
for (k = 0; k < l; k++) {
sum_ij += d[pivot*l+k]*d[(iter_li->data)*l+k];
}
rho = (l*sum_ij-sums[pivot]*sums[iter_li->data])/sqrt((l*sumsqs[pivot]-sums[pivot]*sums[pivot])
*(l*sumsqs[iter_li->data]-sums[iter_li->data]*sums[iter_li->data]));
(*corr_comps)++;
((long int *) llist_ptr_back(&correlations_idx_l))[i] = iter_li->data;
((double *) llist_ptr_back(&correlations_val_l))[i] = rho;
// add time series to cluster
if (rho >= alpha) {
llist_li_relink(iter_li, ×eries_l, clustermemb_l);
}
iter_li = iter_li_next;
i++;
}
// add to final clustering
llist_ptr_push_back(&cluster_l, clustermemb_l);
}
*n_clus = llist_li_size(&pivot_l);
printf("\rclustering finished with %ld correlation computations --- %ld clusters detected\n", *corr_comps, *n_clus);
// prepare output array with cluster assignments
// and buffer all clusters in cluster_arr for O(1) access to members
cluster_arr = (long int **) calloc(*n_clus, sizeof (long int *));
cluster_size_arr = (long int *) calloc(*n_clus, sizeof (long int));
i = 0;
iter_ptr = cluster_l.first;
while (iter_ptr != NULL) {
cluster_arr[i] = (long int *) calloc(llist_li_size((llist_li *) iter_ptr->data), sizeof (long int));
cluster_size_arr[i] = llist_li_size((llist_li *) iter_ptr->data);
j = 0;
iter_li = ((llist_li *) iter_ptr->data)->first;
while (iter_li != NULL) {
cluster_arr[i][j] = iter_li->data;
(*membs)[iter_li->data] = i;
iter_li = iter_li->next;
j++;
}
llist_li_destroy((llist_li *) iter_ptr->data);
free(iter_ptr->data);
iter_ptr = iter_ptr->next;
i++;
}
// prepare output array with pivots
*pivots = (long int *) calloc(*n_clus, sizeof (long int));
i = 0;
iter_li = pivot_l.first;
while (iter_li != NULL) {
(*pivots)[i] = iter_li->data;
iter_li = iter_li->next;
i++;
}
// prepare output array with correlation estimates in O(K*(K+1)/2 * log2(N) * log2(N))
// NOTE: we use binary search to look up the precomputed pivot-time series correlations;
// no additional correlation computations are necessary, which would require O(K*(K+1)/2 * T * log2(N))
*cluster_corrs = (double *) calloc((*n_clus)*(*n_clus+1)/2, sizeof (double));
iter_idx = correlations_idx_l.first;
iter_val = correlations_val_l.first;
iter_li = correlations_cnt_l.first;
for (i = 0; i < (*n_clus); i++) { // loop over all pairs of pivots
for (j = i; j < (*n_clus); j++) {
switch (est_strat) {
case COREQ_PIVOT:
// search for pivot j in the correlation index list of pivot i
rpos = binary_search_li((*pivots)[j], ((long int *) iter_idx->data), iter_li->data);
// retrieve pivot i-j correlation from position correlation value list at position rpos
(*cluster_corrs)[i*(*n_clus)-i*(i+1)/2+j] = ((double *) iter_val->data)[rpos];
break;
case COREQ_PIVOT_GUARANTEE:
// same as above, but with scaling alpha-dependent scaling factor
rpos = binary_search_li((*pivots)[j], ((long int *) iter_idx->data), iter_li->data);
(*cluster_corrs)[i*(*n_clus)-i*(i+1)/2+j] = 0.5*(1.0+alpha*alpha) * ((double *) iter_val->data)[rpos];
break;
case COREQ_AVERAGE:
default:
// sample log2(N_k) precomputed correlations and use average as estimate
sample_size = fmax(1,ceil(log2(cluster_size_arr[j])));
(*cluster_corrs)[i*(*n_clus)-i*(i+1)/2+j] = 0;
for (k = 0; k < sample_size; k++) {
// sample random member of cluster j
long int sample = rand() % cluster_size_arr[j];
rpos = binary_search_li(cluster_arr[j][sample], ((long int *) iter_idx->data), iter_li->data);
(*cluster_corrs)[i*(*n_clus)-i*(i+1)/2+j] += ((double *) iter_val->data)[rpos];
}
(*cluster_corrs)[i*(*n_clus)-i*(i+1)/2+j] /= sample_size;
}
}
iter_idx = iter_idx->next;
iter_val = iter_val->next;
iter_li = iter_li->next;
}
// destroy correlation containers
iter_ptr = correlations_idx_l.first;
while (iter_ptr != NULL) {
free(iter_ptr->data);
iter_ptr = iter_ptr->next;
}
iter_ptr = correlations_val_l.first;
while (iter_ptr != NULL) {
free(iter_ptr->data);
iter_ptr = iter_ptr->next;
}
// destroy cluster buffer array
for (i = 0; i < *n_clus; i++) {
free(cluster_arr[i]);
}
free(cluster_arr);
free(cluster_size_arr);
llist_ptr_destroy(&cluster_l);
llist_ptr_destroy(&correlations_idx_l);
llist_ptr_destroy(&correlations_val_l);
llist_li_destroy(&correlations_cnt_l);
llist_li_destroy(×eries_l);
llist_li_destroy(&pivot_l);
free(sums);
free(sumsqs);
return *membs;
}
// compute aggregated losses for evaluation:
// absolute deviation, squared deviation, maximum deviation
//
// d: data set with n rows and l columns (may be NULL if corr_triu specified)
// corr_triu: precomputed true correlations in triu array (may be NULL if d specified)
// corr_clus_triu: precomputed cluster correlations in triu array
// membs: cluster membership vector of length n
// n: number of time series (rows in d)
// l: number of time steps (columns in d)
// k: number of clusters
int
compute_loss(const double *d, const double *corr_triu, const double *corr_clus_triu, const long int *membs,
long int n, long int l, long int k,
double *loss_abs, double *loss_sq, double *loss_max, long int *elements) {
long int i, j, ii, jj;
double corr_est, corr_tru;
double loss_abs0 = 0., loss_sq0 = 0., loss_max0 = 0.;
long int elements0 = 0;
int abort = 0;
if ((d == NULL) && (corr_triu == NULL)) {
return -1;
}
#pragma omp parallel for private(i, j, corr_tru, corr_est, ii, jj) \
reduction(+:loss_abs0,loss_sq0,elements0) \
reduction(max:loss_max0) \
schedule(dynamic)
for (i = 0; i < n; i++) {
if ((membs[i] < 0) || (membs[i] >= k)) {
// Invalid cluster index (must have range 0, ..., k-1). Noise cluster 0 missing?
abort = 1;
#pragma omp flush (abort)
}
for (j = i; j < n; j++) {
// for error handling
#pragma omp flush (abort)
if (abort)
continue;
if ((membs[j] < 0) || (membs[j] >= k)) {
// Invalid cluster index (must have range 0, ..., k-1). Noise cluster 0 missing?
abort = 1;
#pragma omp flush (abort)
}
if (corr_triu != NULL) { // use precomputed correlation
corr_tru = corr_triu[i*n-(i*(i+1))/2+j]; // triu indexing (with diagonal)
} else {
corr_tru = pearson2(d, i, j, l);
}
ii = fminl(membs[i], membs[j]); // triu index formula below for ii<jj
jj = fmaxl(membs[i], membs[j]);
corr_est = corr_clus_triu[ii*k-ii*(ii+1)/2+jj]; // triu indexing (with diagonal)
if (!(isnan(corr_tru) || isnan(corr_est))) {
elements0 += 1;
loss_abs0 += fabs(corr_tru-corr_est);
loss_sq0 += (corr_tru-corr_est)*(corr_tru-corr_est);
if (loss_max0 < fabs(corr_tru-corr_est)) {
loss_max0 = fabs(corr_tru-corr_est);
}
}
}
}
if (abort) {
return -2;
}
*loss_abs = loss_abs0;
*loss_sq = loss_sq0;
*loss_max = loss_max0;
*elements = elements0;
return 0;
}
|
noWait.c |
// OpenMP NoWait Example
// Inclusions
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
// Main
int main( int argc, char** argv ) {
int i = 0; // Loop Iterator
int n = 0; // Number of Iterations
double start = 0.0; // Start Time
double middle = 0.0; // Middle Time
double end = 0.0; // End Time
double for1 = 0.0; // For Loop 1 Time
double for2 = 0.0; // For Loop 2 Time
double total = 0.0; // Total Time
// Parallel Region Start
#pragma omp parallel \
shared( n ) \
private( i )
{
start = omp_get_wtime( ); // Get Start Time
#pragma omp for nowait // Parallelize For Loop - Don't Wait for All to End
for( i = 0; i < n; i++ ) { // Interate Through
printf( "Thread %d of %d - Iteration %d\n",
omp_get_thread_num( ),
omp_get_max_threads( ), i );
}
middle = omp_get_wtime( ); // Get Middle Time
#pragma omp for nowait // Parallelize For Loop - Don't Wait for All to End
for( i = 0; i < n; i++ ) { // Iterate Through
printf( "Thread %d of %d - Iteration %d\n",
omp_get_thread_num( ),
omp_get_max_threads( ), i );
}
end = omp_get_wtime( ); // Get End Time
}
// Calcule Time
for1 = middle - start;
for2 = end - middle;
total = end - start;
// Display Time
printf( "For Loop 1: %0.9lf\n", for1 );
printf( "For Loop 2: %0.9lf\n", for2 );
printf( "Total Time: %0.9lf\n", total );
return 0;
}
// End noWait.c - EWG SDG
|
omp_parallel_for.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for directive.</ompts:testdescription>
<ompts:ompversion>1.0</ompts:ompversion>
<ompts:directive>omp parallel for</ompts:directive>
<ompts:dependences></ompts:dependences>
<ompts:testcode>
#include "omp_testsuite.h"
#include <stdio.h>
int <ompts:testcode:functionname>omp_parallel_for</ompts:testcode:functionname>(FILE * logFile){
int i;
int data[LOOPCOUNT];
<ompts:check>#pragma omp parallel for</ompts:check>
for (i = 0; i < LOOPCOUNT; i++)
{
data[i] = i;
}
for (i = 0; i < LOOPCOUNT; i++)
{
if (data[i] != i) return 0;
}
return 1;
}
</ompts:testcode>
</ompts:test>
|
gradfm_mex.c | #include <inttypes.h>
#include <omp.h>
#include "mex.h"
void gradfmf(float *dx, float *dy, float *dz,
const float *u, const uint8_t *G,
const double *h, const size_t *sz);
void gradfmd(double *dx, double *dy, double *dz,
const double *u, const uint8_t *G,
const double *h, const size_t *sz);
void
mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
if ((nrhs != 6) || (nlhs > 1)) {
mexErrMsgTxt("Usage: gradfm_mex(dx, dy, dz, u, G, h);");
return;
}
const uint8_t *G = (const uint8_t *)mxGetData(prhs[4]);
const double *h = (const double *)mxGetData(prhs[5]);
const size_t *sz = (const size_t *)mxGetDimensions(prhs[0]);
if (mxIsSingle(prhs[0])) {
float *dx = (float *)mxGetData(prhs[0]);
float *dy = (float *)mxGetData(prhs[1]);
float *dz = (float *)mxGetData(prhs[2]);
const float *u = (const float *)mxGetData(prhs[3]);
gradfmf(dx, dy, dz, u, G, h, sz);
} else {
double *dx = (double *)mxGetData(prhs[0]);
double *dy = (double *)mxGetData(prhs[1]);
double *dz = (double *)mxGetData(prhs[2]);
const double *u = (const double *)mxGetData(prhs[3]);
gradfmd(dx, dy, dz, u, G, h, sz);
}
if (nlhs == 1) {
plhs[0] = mxCreateDoubleScalar(1.0);
}
return;
}
void
gradfmf(float *dx, float *dy, float *dz,
const float *u, const uint8_t *G,
const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t nxnynz = nx*ny*nz;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const float hx = (float)(1.0/h[0]);
const float hy = (float)(1.0/h[1]);
const float hz = (float)(1.0/h[2]);
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if(nxnynz > 16*16*16)
for(k = 0; k < nxnynz; k += nxny) {
for(j = 0; j < nxny; j += nx) {
l = j + k;
for(i = 0; i < nx; ++i, ++l) {
if (G[l]) {
dx[l] =
(i < NX) && G[l+1] ? hx*(u[l+1]-u[l]) :
(i > 0) && G[l-1] ? hx*(u[l]-u[l-1]) :
0.0f;
dy[l] =
(j < NY) && G[l+nx] ? hy*(u[l+nx]-u[l]) :
(j > 0) && G[l-nx] ? hy*(u[l]-u[l-nx]) :
0.0f;
dz[l] =
(k < NZ) && G[l+nxny] ? hz*(u[l+nxny]-u[l]) :
(k > 0) && G[l-nxny] ? hz*(u[l]-u[l-nxny]) :
0.0f;
}
}
}
}
return;
}
void
gradfmd(double *dx, double *dy, double *dz,
const double *u, const uint8_t *G,
const double *h, const size_t *sz)
{
size_t i, j, k;
size_t l;
const size_t nx = sz[0];
const size_t ny = sz[1];
const size_t nz = sz[2];
const size_t nxny = nx*ny;
const size_t nxnynz = nx*ny*nz;
const size_t NX = nx-1;
const size_t NY = nx*(ny-1);
const size_t NZ = nxny*(nz-1);
const double hx = 1.0/h[0];
const double hy = 1.0/h[1];
const double hz = 1.0/h[2];
#pragma omp parallel for private(i,j,k,l) schedule(static) \
if(nxnynz > 16*16*16)
for(k = 0; k < nxnynz; k += nxny) {
for(j = 0; j < nxny; j += nx) {
l = j + k;
for(i = 0; i < nx; ++i, ++l) {
if (G[l]) {
dx[l] =
(i < NX) && G[l+1] ? hx*(u[l+1]-u[l]) :
(i > 0) && G[l-1] ? hx*(u[l]-u[l-1]) :
0.0;
dy[l] =
(j < NY) && G[l+nx] ? hy*(u[l+nx]-u[l]) :
(j > 0) && G[l-nx] ? hy*(u[l]-u[l-nx]) :
0.0;
dz[l] =
(k < NZ) && G[l+nxny] ? hz*(u[l+nxny]-u[l]) :
(k > 0) && G[l-nxny] ? hz*(u[l]-u[l-nxny]) :
0.0;
}
}
}
}
return;
}
|
GB_unaryop__ainv_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_int16
// op(A') function: GB_tran__ainv_fp32_int16
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_int16
(
float *Cx, // Cx and Ax may be aliased
int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_uint16
// A.*B function (eWiseMult): GB_AemultB__rdiv_uint16
// A*D function (colscale): GB_AxD__rdiv_uint16
// D*A function (rowscale): GB_DxB__rdiv_uint16
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_uint16
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_uint16
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_uint16
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_uint16
// C=scalar+B GB_bind1st__rdiv_uint16
// C=scalar+B' GB_bind1st_tran__rdiv_uint16
// C=A+scalar GB_bind2nd__rdiv_uint16
// C=A'+scalar GB_bind2nd_tran__rdiv_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_IDIV_UNSIGNED (y, x, 16) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT16 || GxB_NO_RDIV_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__rdiv_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_uint16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 16) ; \
}
GrB_Info GB_bind1st_tran__rdiv_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 16) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
return(n < (ssize_t) number_pixels ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
if (image->type != UndefinedType)
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the colorspace of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0))
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
assert(nexus_info->signature == MagickCoreSignature);
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
if (image->taint == MagickFalse)
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((status != MagickFalse) && (image->taint == MagickFalse))
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
GB_unop__identity_uint32_uint64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint32_uint64
// op(A') function: GB_unop_tran__identity_uint32_uint64
// C type: uint32_t
// A type: uint64_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint32_t z = (uint32_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint32_uint64
(
uint32_t *Cx, // Cx and Ax may be aliased
const uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint64_t aij = Ax [p] ;
uint32_t z = (uint32_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint32_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gather_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jxyang@openailab.com
* Update: hhchen@openailab.com
*/
#include "gather_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
#include <string.h>
typedef struct
{
int* in_shape; // the dim of the input
int axis;
int indices_num;
int dim_size;
int is_onnx;
} gather_param_t;
static int ref_gather_fp32(float* input, float* input_indices, float* output, gather_param_t* param, int num_thread)
{
float* out_ptr = output;
float* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
float* indices = input_indices;
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
// TLOG_ERR("inner_size size: %d %d \n", inner_size, param->in_shape[i]);
}
if(param->is_onnx){
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + i * inner_size,
in_ptr + (int)indices[i]* inner_size, inner_size *sizeof(float));
}
} else {
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size * sizeof(float));
}
}
}
return 0;
}
static int ref_gather_uint8(uint8_t* input, int* input_indices, uint8_t* output, gather_param_t* param, int num_thread)
{
uint8_t* out_ptr = output;
uint8_t* in_ptr = input;
int axis = param->axis;
int outer_size = 1;
int inner_size = 1;
int axis_size = param->in_shape[axis];
for (int i = 0; i < axis; i++)
{
outer_size *= param->in_shape[i];
}
for (int i = axis + 1; i < param->dim_size; i++)
{
inner_size *= param->in_shape[i];
}
// #pragma omp parallel for num_threads(num_thread)
for (int outer = 0; outer < outer_size; ++outer)
{
for (int i = 0; i < param->indices_num; i++)
{
memcpy(out_ptr + (outer * param->indices_num + i) * inner_size,
in_ptr + (outer * axis_size + ( int )input_indices[i]) * inner_size, inner_size);
}
}
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct gather_param* gather_param = ( struct gather_param* )ir_node->op.param_mem;
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
op_priv_info->axis = gather_param->axis;
op_priv_info->indices_num = gather_param->indices_num;
op_priv_info->is_onnx = gather_param->is_onnx;
op_priv_info->in_shape = (int*)sys_malloc(input_tensor->dim_num*sizeof(int));
/* prerun now */
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct tensor* indices_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]);
float* x = input_tensor->data;
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
int out_size = input_tensor->elem_num;
// auto in_dim = input_tensor->GetShape().GetDim();
void* input = input_tensor->data;
void* indices_data = indices_tensor->data;
op_priv_info->dim_size = input_tensor->dim_num;
for (int i = 0; i < op_priv_info->dim_size; i++)
{
op_priv_info->in_shape[i] = input_tensor->dims[i];
}
// TLOG_ERR("in shape: %d %d %d %d\n", op_priv_info->in_shape[0], op_priv_info->in_shape[1], op_priv_info->in_shape[3], op_priv_info->in_shape[3]);
// int indices_num = op_param.indices_num;
void* output = output_tensor->data;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_gather_fp32(input, indices_data, output, op_priv_info, exec_graph->num_thread);
else if(input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_gather_uint8(input, indices_data, output, op_priv_info, exec_graph->num_thread);
return ret;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
gather_param_t* op_priv_info = ( gather_param_t* )sys_malloc(sizeof(gather_param_t));
if (op_priv_info == NULL)
{
return -1;
}
memset(op_priv_info, 0, sizeof(gather_param_t));
exec_node->ops_priv = op_priv_info;
return 0;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
gather_param_t* op_param = (gather_param_t*)exec_node->ops_priv;
sys_free(op_param->in_shape);
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
gather_param_t* op_priv_info = ( gather_param_t* )exec_node->ops_priv;
sys_free(op_priv_info);
exec_node->ops_priv = NULL;
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_BEST;
}
static struct node_ops gather_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_gather_ref_op()
{
return register_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
int unregister_gather_ref_op()
{
return unregister_builtin_node_ops(OP_GATHER, &gather_node_ops);
}
|
GB_binop__eq_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__eq_int16
// A.*B function (eWiseMult): GB_AemultB__eq_int16
// A*D function (colscale): GB_AxD__eq_int16
// D*A function (rowscale): GB_DxB__eq_int16
// C+=B function (dense accum): GB_Cdense_accumB__eq_int16
// C+=b function (dense accum): GB_Cdense_accumb__eq_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_int16
// C=scalar+B GB_bind1st__eq_int16
// C=scalar+B' GB_bind1st_tran__eq_int16
// C=A+scalar GB_bind2nd__eq_int16
// C=A'+scalar GB_bind2nd_tran__eq_int16
// C type: bool
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EQ || GxB_NO_INT16 || GxB_NO_EQ_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__eq_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__eq_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__eq_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__eq_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__eq_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__eq_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__eq_int16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__eq_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__eq_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__eq_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__eq_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc64_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fc64_uint32
// op(A') function: GB_unop_tran__identity_fc64_uint32
// C type: GxB_FC64_t
// A type: uint32_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fc64_uint32
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
QlParCallableBonds.h | #pragma once
#include <ql/qldefines.hpp>
#ifdef BOOST_MSVC
# include <ql/auto_link.hpp>
#endif
#include <ql/experimental/callablebonds/callablebond.hpp>
#include <ql/experimental/callablebonds/treecallablebondengine.hpp>
#include <ql/models/shortrate/onefactormodels/hullwhite.hpp>
#include <ql/termstructures/yield/flatforward.hpp>
#include <ql/time/calendars/unitedstates.hpp>
#include <ql/time/daycounters/actualactual.hpp>
//#include <ql/time/daycounters/thirty360.hpp>
#include <chrono>
#include <limits>
#include <vector>
#include <cmath>
#include <iomanip>
#include <iostream>
#include "../common/ValuationConstants.hpp"
#include "../common/Domain.hpp"
#include "../common/progress-cpp/ProgressBar.hpp"
using namespace std;
using namespace QuantLib;
using namespace trinom;
namespace qlpar
{
ext::shared_ptr<YieldTermStructure>
flatRate(const Date& today,
const ext::shared_ptr<Quote>& forward,
const DayCounter& dc,
const Compounding& compounding,
const Frequency& frequency) {
return ext::shared_ptr<YieldTermStructure>(
new FlatForward(today,
Handle<Quote>(forward),
dc,
compounding,
frequency));
}
ext::shared_ptr<YieldTermStructure>
flatRate(const Date& today,
Rate forward,
const DayCounter& dc,
const Compounding &compounding,
const Frequency &frequency) {
return flatRate(today,
ext::shared_ptr<Quote>(new SimpleQuote(forward)),
dc,
compounding,
frequency);
}
/* Bloomberg OAS1: "N" model (Hull White)
varying volatility parameter
The curve entered into Bloomberg OAS1 is a flat curve,
at constant yield = 5.5%, semiannual compounding.
Assume here OAS1 curve uses an ACT/ACT day counter,
as documented in PFC1 as a "default" in the latter case.
*/
Real computeValuation(Integer maturityDays, Real meanReversionRate, Real volatility, Real strike, Integer gridIntervals)
{
Date today = Date(16, October, 2007);
Settings::instance().evaluationDate() = today;
// set up a flat curve corresponding to Bloomberg flat curve
Rate bbCurveRate = 0.0501772;
DayCounter bbDayCounter = ActualActual(ActualActual::Bond);
//DayCounter bbDayCounter = Thirty360(Thirty360::BondBasis);
InterestRate bbIR(bbCurveRate, bbDayCounter, Compounded, Semiannual);
Handle<YieldTermStructure> termStructure(flatRate(today,
bbIR.rate(),
bbIR.dayCounter(),
bbIR.compounding(),
bbIR.frequency()));
// set up the call schedule
CallabilitySchedule callSchedule;
Real callPrice = strike;
//Size numberOfCallDates = 24;
Date callDate = today + 180;
Date maturity = today + maturityDays;
//for (Size i = 0; i < numberOfCallDates; i++) {
while (callDate < maturity) {
Calendar nullCalendar = NullCalendar();
Callability::Price myPrice(callPrice,
Callability::Price::Clean);
callSchedule.push_back(
ext::make_shared<Callability>(
myPrice,
Callability::Call,
callDate));
callDate = nullCalendar.advance(callDate, 3, Months);
}
// set up the callable bond
Date dated = Date(16, September, 2004);
Date issue = dated;
Natural settlementDays = 3; // Bloomberg OAS1 settle is Oct 19, 2007
Calendar bondCalendar = UnitedStates(UnitedStates::GovernmentBond);
Real coupon = .0465;
Frequency frequency = Quarterly;
Real redemption = 100.0;
Real faceAmount = 100.0;
/* The 30/360 day counter Bloomberg uses for this bond cannot
reproduce the US Bond/ISMA (constant) cashflows used in PFC1.
Therefore use ActAct(Bond)
*/
DayCounter bondDayCounter = ActualActual(ActualActual::Bond);
// PFC1 shows no indication dates are being adjusted
// for weekends/holidays for vanilla bonds
BusinessDayConvention accrualConvention = Unadjusted;
BusinessDayConvention paymentConvention = Unadjusted;
Schedule sch(dated, maturity, Period(frequency), bondCalendar,
accrualConvention, accrualConvention,
DateGeneration::Backward, false);
Size maxIterations = 1000;
Real accuracy = 1e-8;
//Integer gridIntervals = 40;
Real reversionParameter = meanReversionRate;
Real sigma = volatility; // core dumps if zero on Cygwin
ext::shared_ptr<ShortRateModel> hw0(
new HullWhite(termStructure, reversionParameter, sigma));
ext::shared_ptr<PricingEngine> engine0(
new TreeCallableFixedRateBondEngine(hw0, gridIntervals));
CallableFixedRateBond callableBond(settlementDays, faceAmount, sch,
vector<Rate>(1, coupon),
bondDayCounter, paymentConvention,
redemption, issue, callSchedule);
callableBond.setPricingEngine(engine0);
//cout << setprecision(2)
// << showpoint
// << fixed
// << "sigma/vol (%) = "
// << 100.*sigma
// << endl;
auto price = callableBond.cleanPrice();
//auto yield = 100. * callableBond.yield(bondDayCounter,
// Compounded,
// frequency,
// accuracy,
// maxIterations);
//cout << ".";
//cout << "QuantLib price/yld (%) ";
//cout << price << " / " << yield << endl;
return price;
}
void computeValuations(const Valuations &valuations, std::vector<trinom::real> &results)
{
//ProgressBar progressBar(valuations.ValuationCount, 70, '#', '-');
#pragma omp parallel for
for (auto i = 0; i < valuations.ValuationCount; ++i)
{
//++progressBar; // record the tick
auto maturityDays = (int)ceil(valuations.Maturities[i] * 12 * 30); // days
auto meanReversionRate = valuations.MeanReversionRates[i];
auto volatility = valuations.Volatilities[i];
auto strike = valuations.StrikePrices[i];
auto gridIntervals = (int)(valuations.Maturities[i] * valuations.TermSteps[i]);
auto result = qlpar::computeValuation(maturityDays, meanReversionRate, volatility, strike, gridIntervals);
results[i] = result;
//cout << ".";
// display the bar only at certain steps
//if (i % 10 == 0)
// progressBar.display();
}
cout << endl;
// tell the bar to finish
//progressBar.done();
}
} // namespace qlpar
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/morphology.h"
#include "MagickCore/morphology-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/prepress.h"
#include "MagickCore/quantize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/registry.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t f,l;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo() when you
% are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MagickPathExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireQuantumMemory(1,sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
GetNextToken(p,&p,MagickPathExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string,
ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
GetNextToken(kernel_string,&p,MagickPathExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args, exception);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string,
ExceptionInfo *exception)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MagickPathExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MagickPathExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p,exception);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters.
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0,-2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args,ExceptionInfo *exception)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature=MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(1,sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +(MagickRealType) MagickSQ2;
kernel->values[5] = kernel->values[7]= -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
{
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
}
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +(MagickRealType) MagickSQ2;
kernel->values[5] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +(MagickRealType) MagickSQ2;
kernel->values[7] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +(MagickRealType) MagickSQ2;
kernel->values[8] = -(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -(MagickRealType) MagickSQ2;
kernel->values[6] = +(MagickRealType) MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>",exception));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>",exception));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;",exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo("ThinSE:41; ThinSE:42; ThinSE:43",
exception);
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*
sizeof(*kernel->values)));
if (kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(MagickRealType *) MagickAssumeAligned(
AcquireAlignedMemory(kernel->width,kernel->height*sizeof(*kernel->values)));
if (new_kernel->values == (MagickRealType *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(MagickRealType *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle)
{
KernelInfo
*clone_info,
*last;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but without
% any user controls. This allows internel programs to use this method to
% perform a specific task without possible interference by any API user
% supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ssize_t iterations,const KernelInfo *kernel,
% const CompositeMethod compose,const double bias,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t MorphologyPrimitive(const Image *image,Image *morphology_image,
const MorphologyMethod method,const KernelInfo *kernel,const double bias,
ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*image_view,
*morphology_view;
OffsetInfo
offset;
register ssize_t
j,
y;
size_t
*changes,
changed,
width;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(morphology_image != (Image *) NULL);
assert(morphology_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(morphology_image,exception);
width=image->columns+kernel->width-1;
offset.x=0;
offset.y=0;
switch (method)
{
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
{
/*
Kernel needs to used with reflection about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
default:
{
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changes[j]=0;
if ((method == ConvolveMorphology) && (kernel->width == 1))
{
register ssize_t
x;
/*
Special handling (for speed) of vertical (blur) kernels. This performs
its handling in columns rather than in rows. This is only done
for convolve as it is the only method that generates very large 1-D
vertical kernels (such as a 'BlurKernel')
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
r;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,x,-offset.y,1,image->rows+
kernel->height-1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,x,0,1,
morphology_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*offset.y;
for (r=0; r < (ssize_t) image->rows; r++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
v;
size_t
count;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
k=(&kernel->values[kernel->height-1]);
pixels=p;
pixel=bias;
gamma=0.0;
count=0;
if ((morphology_traits & BlendPixelTrait) == 0)
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
gamma+=(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
else
for (v=0; v < (ssize_t) kernel->height; v++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*
pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_image->type=image->type;
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
Normal handling of horizontal or rectangular kernels (row by row).
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,morphology_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,
kernel->height,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,morphology_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha,
gamma,
intensity,
maximum,
minimum,
pixel;
PixelChannel
channel;
PixelTrait
morphology_traits,
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
size_t
count;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
morphology_traits=GetPixelChannelTraits(morphology_image,channel);
if ((traits == UndefinedPixelTrait) ||
(morphology_traits == UndefinedPixelTrait))
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
{
SetPixelChannel(morphology_image,channel,p[center+i],q);
continue;
}
pixels=p;
maximum=0.0;
minimum=(double) QuantumRange;
switch (method)
{
case ConvolveMorphology: pixel=bias; break;
case DilateMorphology:
case ErodeIntensityMorphology:
{
pixel=0.0;
break;
}
default:
{
pixel=(double) p[center+i];
break;
}
}
count=0;
gamma=1.0;
switch (method)
{
case ConvolveMorphology:
{
/*
Weighted Average of pixels using reflected kernel
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
Correlation is actually the same as this but without reflecting
the kernel, and thus 'lower-level' that Convolution. However as
Convolution is the more common method used, and it does not
really cost us much in terms of processing to use a reflected
kernel, so it is Convolution that is implemented.
Correlation will have its kernel reflected before calling this
function to do a Convolve.
For more details of Correlation vs Convolution see
http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
if ((morphology_traits & BlendPixelTrait) == 0)
{
/*
No alpha blending.
*/
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
pixel+=(*k)*pixels[i];
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
/*
Alpha blending.
*/
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels));
pixel+=alpha*(*k)*pixels[i];
gamma+=alpha*(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case ErodeMorphology:
{
/*
Minimum value within kernel neighbourhood.
The kernel is not reflected for this operation. In normal
Greyscale Morphology, the kernel value should be added
to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateMorphology:
{
/*
Maximum value within kernel neighbourhood.
For correct working of this operation for asymetrical kernels,
the kernel needs to be applied in its reflected form. That is
its values needs to be reversed.
In normal Greyscale Morphology, the kernel value should be
added to the real value, this is currently not done, due to the
nature of the boolean kernels being used.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k > 0.5))
{
if ((double) pixels[i] > pixel)
pixel=(double) pixels[i];
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
{
/*
Minimum of foreground pixel minus maxumum of background pixels.
The kernel is not reflected for this operation, and consists
of both foreground and background pixel neighbourhoods, 0.0 for
background, and 1.0 for foreground with either Nan or 0.5 values
for don't care.
This never produces a meaningless negative result. Such results
cause Thinning/Thicken to not work correctly when used against a
greyscale image.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if (*k > 0.7)
{
if ((double) pixels[i] < pixel)
pixel=(double) pixels[i];
}
else
if (*k < 0.3)
{
if ((double) pixels[i] > maximum)
maximum=(double) pixels[i];
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
pixel-=maximum;
if (pixel < 0.0)
pixel=0.0;
if (method == ThinningMorphology)
pixel=(double) p[center+i]-pixel;
else
if (method == ThickenMorphology)
pixel+=(double) p[center+i]+pixel;
break;
}
case ErodeIntensityMorphology:
{
/*
Select pixel with minimum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=kernel->values;
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity < minimum)
{
pixel=(double) pixels[i];
minimum=intensity;
}
count++;
}
k++;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case DilateIntensityMorphology:
{
/*
Select pixel with maximum intensity within kernel neighbourhood.
The kernel is not reflected for this operation.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k) && (*k >= 0.5))
{
intensity=(double) GetPixelIntensity(image,pixels);
if (intensity > maximum)
{
pixel=(double) pixels[i];
maximum=intensity;
}
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case IterativeDistanceMorphology:
{
/*
Compute th iterative distance from black edge of a white image
shape. Essentually white values are decreased to the smallest
'distance from edge' it can find.
It works by adding kernel values to the neighbourhood, and and
select the minimum value found. The kernel is rotated before
use, so kernel distances match resulting distances, when a user
provided asymmetric kernel is applied.
This code is nearly identical to True GrayScale Morphology but
not quite.
GreyDilate Kernel values added, maximum value found Kernel is
rotated before use.
GrayErode: Kernel values subtracted and minimum value found No
kernel rotation used.
Note the the Iterative Distance method is essentially a
GrayErode, but with negative kernel values, and kernel rotation
applied.
*/
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
count++;
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
break;
}
case UndefinedMorphology:
default:
break;
}
if (fabs(pixel-p[center+i]) > MagickEpsilon)
changes[id]++;
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelChannel(morphology_image,channel,ClampToQuantum(gamma*pixel),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(morphology_image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphologyPrimitive)
#endif
proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
for (j=0; j < (ssize_t) GetOpenMPMaximumThreads(); j++)
changed+=changes[j];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : -1);
}
/*
This is almost identical to the MorphologyPrimative() function above, but
applies the primitive directly to the actual image using two passes, once in
each direction, with the results of the previous (and current) row being
re-used.
That is after each row is 'Sync'ed' into the image, the next row makes use of
those values as part of the calculation of the next row. It repeats, but
going in the oppisite (bottom-up) direction.
Because of this 're-use of results' this function can not make use of multi-
threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method,const KernelInfo *kernel,
ExceptionInfo *exception)
{
CacheView
*morphology_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
size_t
width,
changed;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
changed=0;
progress=0;
switch(method)
{
case DistanceMorphology:
case VoronoiMorphology:
{
/*
Kernel reflected about origin.
*/
offset.x=(ssize_t) kernel->width-kernel->x-1;
offset.y=(ssize_t) kernel->height-kernel->y-1;
break;
}
default:
{
offset.x=kernel->x;
offset.y=kernel->y;
break;
}
}
/*
Two views into same image, do not thread.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image! We read
using virtual to get virtual pixel handling, but write back into the same
image.
Only top half of kernel is processed as we do a single pass downward
through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y-offset.y,width,(size_t)
offset.y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) (GetPixelChannels(image)*width*offset.y+
GetPixelChannels(image)*offset.x);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v <= offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*kernel->height-1]);
for (v=0; v < offset.y; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q-offset.x*GetPixelChannels(image);
for (u=0; u < offset.x; u++)
{
if (!IsNaN(*k) && ((x+u-offset.x) >= 0))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
/*
Do the reverse pass through the image.
*/
image_view=AcquireVirtualCacheView(image,exception);
morphology_view=AcquireAuthenticCacheView(image,exception);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
/*
Read virtual pixels, and authentic pixels, from the same image. We
read using virtual to get virtual pixel handling, but write back
into the same image.
Only the bottom half of the kernel is processed as we up the image.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-offset.x,y,width,(size_t)
kernel->y+1,exception);
q=GetCacheViewAuthenticPixels(morphology_view,0,y,image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
p+=(image->columns-1)*GetPixelChannels(image);
q+=(image->columns-1)*GetPixelChannels(image);
center=(ssize_t) (offset.x*GetPixelChannels(image));
for (x=(ssize_t) image->columns-1; x >= 0; x--)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelChannel
channel;
PixelTrait
traits;
register const MagickRealType
*magick_restrict k;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
channel=GetPixelChannelChannel(image,i);
traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p+center) <= (QuantumRange/2)))
continue;
pixels=p;
pixel=(double) QuantumRange;
switch (method)
{
case DistanceMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*kernel->y+kernel->x-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
case VoronoiMorphology:
{
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
for (v=offset.y; v < (ssize_t) kernel->height; v++)
{
for (u=0; u < (ssize_t) kernel->width; u++)
{
if (!IsNaN(*k))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
pixels+=GetPixelChannels(image);
}
pixels+=(image->columns-1)*GetPixelChannels(image);
}
k=(&kernel->values[kernel->width*(kernel->y+1)-1]);
pixels=q;
for (u=offset.x+1; u < (ssize_t) kernel->width; u++)
{
pixels+=GetPixelChannels(image);
if (!IsNaN(*k) && ((x+u-offset.x) < (ssize_t) image->columns))
{
if ((pixels[i]+(*k)) < pixel)
pixel=(double) pixels[i]+(*k);
}
k--;
}
break;
}
default:
break;
}
if (fabs(pixel-q[i]) > MagickEpsilon)
changed++;
q[i]=ClampToQuantum(pixel);
}
p-=GetPixelChannels(image);
q-=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(morphology_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphologyTag,progress++,2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
morphology_view=DestroyCacheView(morphology_view);
image_view=DestroyCacheView(image_view);
return(status ? (ssize_t) changed : -1);
}
/*
Apply a Morphology by calling one of the above low level primitive
application functions. This function handles any iteration loops,
composition or re-iteration of results, and compound morphology methods that
is based on multiple low-level (staged) morphology methods.
Basically this provides the complex glue between the requested morphology
method and raw low-level implementation (above).
*/
MagickPrivate Image *MorphologyApply(const Image *image,
const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,const double bias,
ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MagickPathExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsStringTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
changed=MorphologyPrimitiveDirect(rslt_image,method,kernel,exception);
if (verbose != MagickFalse)
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned it off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
(void) CompositeImage(rslt_image,image,CopyAlphaCompositeOp,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel,
exception);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if (verbose != MagickFalse) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MagickPathExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MagickPathExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass,exception) == MagickFalse)
goto error_cleanup;
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
this_kernel, bias, exception);
if (verbose != MagickFalse) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if (verbose != MagickFalse && kernel_changed != (size_t)changed)
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if (verbose != MagickFalse && stage_loop < stage_limit)
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
break;
case EdgeMorphology:
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",CommandOptionToMnemonic(
MagickMorphologyOptions, method) );
(void) CompositeImage(curr_image,save_image,DifferenceCompositeOp,
MagickTrue,0,0,exception);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if (verbose != MagickFalse) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImage(rslt_image,curr_image,rslt_compose,MagickTrue,
0,0,exception);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if (verbose != MagickFalse)
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImage() applies a user supplied kernel to the image according to
% the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-define convolve:scale=??")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-define morphology:showKernel=1")
%
% Other operators that do not want user supplied options interfering,
% especially "convolve:bias" and "morphology:showKernel" should use
% MorphologyApply() directly.
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
const char
*artifact;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
KernelInfo
*curr_kernel;
curr_kernel = (KernelInfo *) kernel;
bias=0.0;
compose = UndefinedCompositeOp; /* use default for method */
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
if ( method == ConvolveMorphology || method == CorrelateMorphology ) {
/* Get the bias value as it will be needed */
artifact = GetImageArtifact(image,"convolve:bias");
if ( artifact != (const char *) NULL) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:bias",artifact);
else
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
}
/* Scale kernel according to user wishes */
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if (IsGeometry(artifact) == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"InvalidSetting","'%s' '%s'",
"convolve:scale",artifact);
else {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL)
return((Image *) NULL);
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
}
/* display the (normalized) kernel via stderr */
artifact=GetImageArtifact(image,"morphology:showKernel");
if (IsStringTrue(artifact) != MagickFalse)
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{
ssize_t
parse;
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL) {
parse=ParseCommandOption(MagickComposeOptions,
MagickFalse,artifact);
if ( parse < 0 )
(void) ThrowMagickException(exception,GetMagickModule(),
OptionWarning,"UnrecognizedComposeOperator","'%s' '%s'",
"morphology:compose",artifact);
else
compose=(CompositeOperator)parse;
}
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image,method,iterations,
curr_kernel,compose,bias,exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register ssize_t
i,j,x,y;
register MagickRealType
*k,t;
k=kernel->values;
for( i=0, x=(ssize_t) kernel->width-1; i<=x; i++, x--)
for( j=0, y=(ssize_t) kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
MagickRealType
t;
register MagickRealType
*k;
ssize_t
i,
j;
k=kernel->values;
j=(ssize_t) (kernel->width*kernel->height-1);
for (i=0; i < j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
MagickStatusType
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, (GeometryFlags) flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register double
pos_scale,
neg_scale;
register ssize_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if (!IsNaN(kernel->values[i]))
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'morphology:showKernel' option
% request.
%
% The format of the ShowKernel method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if (IsNaN(k->values[i]))
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), (double) k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickPrivate void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if (kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if (IsNaN(kernel->values[i]))
kernel->values[i]=0.0;
return;
}
|
target_data_array_extension.c | // --------------------------------------------------
// Check extends before
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -fopenmp-version=51 -DEXTENDS=BEFORE
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
// --------------------------------------------------
// Check extends after
// --------------------------------------------------
// RUN: %libomptarget-compile-generic \
// RUN: -fopenmp-version=51 -DEXTENDS=AFTER
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
// END.
#include <stdio.h>
#define BEFORE 0
#define AFTER 1
#define SIZE 100
#if EXTENDS == BEFORE
# define SMALL_BEG (SIZE-2)
# define SMALL_END SIZE
# define LARGE_BEG 0
# define LARGE_END SIZE
#elif EXTENDS == AFTER
# define SMALL_BEG 0
# define SMALL_END 2
# define LARGE_BEG 0
# define LARGE_END SIZE
#else
# error EXTENDS undefined
#endif
#define SMALL_SIZE (SMALL_END-SMALL_BEG)
#define LARGE_SIZE (LARGE_END-LARGE_BEG)
#define SMALL SMALL_BEG:SMALL_SIZE
#define LARGE LARGE_BEG:LARGE_SIZE
int main() {
int arr[SIZE];
// CHECK: addr=0x[[#%x,SMALL_ADDR:]], size=[[#%u,SMALL_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[SMALL_BEG],
SMALL_SIZE * sizeof arr[0]);
// CHECK: addr=0x[[#%x,LARGE_ADDR:]], size=[[#%u,LARGE_BYTES:]]
fprintf(stderr, "addr=%p, size=%ld\n", &arr[LARGE_BEG],
LARGE_SIZE * sizeof arr[0]);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[LARGE])
{
#pragma omp target data map(present, tofrom: arr[SMALL])
;
}
// CHECK: arr is present
fprintf(stderr, "arr is present\n");
// CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes), but device allocation maps to host at 0x{{0*}}[[#SMALL_ADDR]] ([[#SMALL_BYTES]] bytes)
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#LARGE_ADDR]] ([[#LARGE_BYTES]] bytes)
// CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer ('present' map type modifier).
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target data map(alloc: arr[SMALL])
{
#pragma omp target data map(present, tofrom: arr[LARGE])
;
}
// CHECK-NOT: arr is present
fprintf(stderr, "arr is present\n");
return 0;
}
|
GB_binop__rdiv_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__rdiv_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_uint32)
// A*D function (colscale): GB (_AxD__rdiv_uint32)
// D*A function (rowscale): GB (_DxB__rdiv_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_uint32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_uint32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_uint32)
// C=scalar+B GB (_bind1st__rdiv_uint32)
// C=scalar+B' GB (_bind1st_tran__rdiv_uint32)
// C=A+scalar GB (_bind2nd__rdiv_uint32)
// C=A'+scalar GB (_bind2nd_tran__rdiv_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 32)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (y, x, 32) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT32 || GxB_NO_RDIV_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rdiv_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 32) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 32) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 32) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_loop.h | // -*- C++ -*-
// Copyright (C) 2007-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/omp_loop.h
* @brief Parallelization of embarrassingly parallel execution by
* means of an OpenMP for loop.
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_OMP_LOOP_H
#define _GLIBCXX_PARALLEL_OMP_LOOP_H 1
#include <omp.h>
#include <parallel/settings.h>
#include <parallel/basic_iterator.h>
#include <parallel/base.h>
namespace __gnu_parallel
{
/** @brief Embarrassingly parallel algorithm for random access
* iterators, using an OpenMP for loop.
*
* @param __begin Begin iterator of element sequence.
* @param __end End iterator of element sequence.
* @param __o User-supplied functor (comparator, predicate, adding
* functor, etc.).
* @param __f Functor to @a process an element with __op (depends on
* desired functionality, e. g. for std::for_each(), ...).
* @param __r Functor to @a add a single __result to the already
* processed elements (depends on functionality).
* @param __base Base value for reduction.
* @param __output Pointer to position where final result is written to
* @param __bound Maximum number of elements processed (e. g. for
* std::count_n()).
* @return User-supplied functor (that may contain a part of the result).
*/
template<typename _RAIter,
typename _Op,
typename _Fu,
typename _Red,
typename _Result>
_Op
__for_each_template_random_access_omp_loop(_RAIter __begin, _RAIter __end,
_Op __o, _Fu& __f, _Red __r,
_Result __base,
_Result& __output,
typename std::iterator_traits<_RAIter>::difference_type __bound)
{
typedef typename std::iterator_traits<_RAIter>::difference_type
_DifferenceType;
_DifferenceType __length = __end - __begin;
_ThreadIndex __num_threads = __gnu_parallel::min<_DifferenceType>
(__get_max_threads(), __length);
_Result *__thread_results;
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__thread_results = new _Result[__num_threads];
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__thread_results[__i] = _Result();
}
_ThreadIndex __iam = omp_get_thread_num();
#pragma omp for schedule(dynamic, _Settings::get().workstealing_chunk_size)
for (_DifferenceType __pos = 0; __pos < __length; ++__pos)
__thread_results[__iam] = __r(__thread_results[__iam],
__f(__o, __begin+__pos));
} //parallel
for (_ThreadIndex __i = 0; __i < __num_threads; ++__i)
__output = __r(__output, __thread_results[__i]);
delete [] __thread_results;
// Points to last element processed (needed as return value for
// some algorithms like transform).
__f._M_finish_iterator = __begin + __length;
return __o;
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_OMP_LOOP_H */
|
parallel.c | //OpenMP version. Edit and submit only this file.
/* Enter your details below
* Name :
* UCLA ID :
* Email :
*/
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include "utils.h"
double work_it_par(long *old, long *new, long *super, long *simple, long *fibonacci) {
int i, j, k, kk, jj, ii;
int u, v, w, u1;
int ton = 0;
long compute_it, i1, moving_average;
double pi, pi2, x, y, sum, step = 0.0;
long dot_product=0;
long nCirc=0;
long aggregate=1.0;
double r=1.0;
int was_smart = 16;
long temp = 0;
int iDim, jDim, iuDim, jDimPlus, jDimPlusPlus, simplified, uDim;
long wntf = we_need_the_func();
long gtf = gimmie_the_func();
int dimSquare = DIM*DIM;
int b = 10;
#pragma omp parallel for private(i)
for(i=0; i<DIM-1;i++)
{
super[i] += simple[i];
}
#pragma omp parallel for private(i) reduction(+:dot_product,moving_average)
for(i=0; i<DIM-1;i++)
{
dot_product += super[i]*simple[i];
moving_average = 0;
for(ton=i;ton<DIM-1-WINDOW_SIZE;ton++)
{
moving_average += simple[ton];
}
}
int a_secret = 5;
fibonacci[0] = 1;
fibonacci[1] = 1;
for(i=2; i<DIM-1;i++)
{
fibonacci[i]=fibonacci[i-1]+fibonacci[i-2];
if(i==3)
{
printf("\n A secret is: %d",obfuscate_obfuscate_obfuscate(a_secret));
}
}
step = 1.0 / NUM_STEPS;
#pragma omp parallel for private(x) reduction(+:sum)
for (i=0;i<NUM_STEPS; i++)
{
x = (i+0.5)*step;
sum = sum + 4.0/(1.0+x*x);
}
pi = step * sum;
printf("\n %d trials, Riemann flavored pi is %f \n",NUM_STEPS, pi);
#pragma omp parallel for private(x, y) reduction(+:nCirc)
for(i=0;i<NUM_TRIALS; i++)
{
x = (random()%10000000)/10000000.0;
y = (random()%10000000)/10000000.0;
if (( x*x + y*y) <= r*r)
{
nCirc++;
}
}
pi2 = 4.0 * ((double)nCirc/(double)NUM_TRIALS);
printf("\n %d trials, Monte-Carlo flavored pi is %f \n",NUM_TRIALS, pi2);
#pragma omp parallel for num_threads(16) private (j, k, compute_it, iDim, jDim, iuDim, u, v, w, simplified, uDim, kk, jj, ii) reduction(+:aggregate, temp)
for (i=1; i<DIM-1-b; i+=b) {
for (j=1; j<DIM-1-b; j+=b) {
for (k=1; k<DIM-1-b; k+=b) {
for (ii = i; ii < i+b; ii++){
iDim = ii*dimSquare;
for (jj = j; jj < j+b; jj++){
jDim = jj*DIM;
for (kk = k; kk < k+b; kk++){
simplified = iDim + jDim + kk;
compute_it = old[simplified] * wntf;
aggregate += compute_it / gtf;
temp = 0;
for (u=-1; u<=1; u++) {
uDim = u*dimSquare;
temp += old[simplified-DIM-1+uDim];
temp += old[simplified-DIM+uDim];
temp += old[simplified-DIM+1+uDim];
temp += old[simplified-1+uDim];
temp += old[simplified+uDim];
temp += old[simplified+1+uDim];
temp += old[simplified+DIM-1+uDim];
temp += old[simplified+DIM+uDim];
temp += old[simplified+DIM+1+uDim];
}
temp/=27;
new[simplified] = temp;
}
}
}
}
}
}
printf("AGGR:%ld\n",aggregate);
for (i=1; i<DIM-1; i++) {
iDim = i*dimSquare;
for (j=1; j<DIM-1; j+=3) {
jDim = j*DIM;
jDimPlus = (j+1)*DIM;
jDimPlusPlus = (j+2)*DIM;
for (k=1; k<DIM-1; k+=3) {
u= (new[iDim+jDim+k] * 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDim+k+1] * 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDim+k+2] * 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlus+k]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlus+k+1]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlus+k+2]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlusPlus+k]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlusPlus+k+1]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
u= (new[iDim+jDimPlusPlus+k+2]* 41) >> 12;
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
}
}
}
/* for (i=1; i<DIM-1; i++) {
for (j=1; j<DIM-1; j++) {
for (k=1; k<DIM-1; k++) {
new[i*DIM*DIM+j*DIM+k]=0;
for (u=-1; u<=1; u++) {
for (v=-1; v<=1; v++) {
for (w=-1; w<=1; w++) {
new[i*DIM*DIM+j*DIM+k]+=old[(i+u)*DIM*DIM+(j+v)*DIM+(k+w)];
}
}
}
new[i*DIM*DIM+j*DIM+k]/=27;
}
}
}
for (i=1; i<DIM-1; i++) {
for (j=1; j<DIM-1; j++) {
for (k=1; k<DIM-1; k++) {
u=(new[i*DIM*DIM+j*DIM+k]/100);
if (u<=0) u=0;
if (u>=9) u=9;
histogrammy[u]++;
}
}
} */
return (double) (dot_product+moving_average+pi+pi2);
}
|
10_single_thread.c | #include <stdio.h>
#include <omp.h>
int main(){
#pragma omp parallel
printf("Parallel from thread %i\n", omp_get_thread_num());
#pragma omp single
{
printf("Single from thread %i\n", omp_get_thread_num());
}
}
|
Vec.h | #ifndef VEC_H
#define VEC_H
/*
Szymon Rusinkiewicz
Princeton University
Vec.h
Class for a constant-length vector, meant to be generally useful for graphics.
Attempts to be similar to the union of <array>, <valarray>, and GLSL vectors,
where convenient.
Creation:
vec v1; // Initialized to (0, 0, 0)
vec v2(1.23f); // Initialized to (1.23f, 1.23f, 1.23f)
vec v3(1, 2, 3); // Initialized to (1, 2, 3)
vec v4(v3); // Copy constructor
float farray[3];
vec v5 = vec(farray); // Explicit: "vec v5 = farray" won't work
SomeOtherVectorType v_other;
vec v6 = vec(v_other); // Anything for which operator[] is defined
point p1, p2, p3; // Same as vec
Vec<3,double> vd; // The "vec" used above is Vec<3,float>
dvec4 vd42; // See typedefs below
v1 = vec::uniform_rnd(3); // Vector of random numbers in (0,3)
v1 = vec::normal_rnd(42); // Gaussian noise vector with sigma = 42
Assignment:
v1 = v2;
v2 = 42; // Assigns (42, 42, 42)
v3.fill(42); // Assigns (42, 42, 42)
v4.set(1,2,3);
v5 = farray;
Access:
float f = v1[0]; // Subscript - not range checked
f = get<2>(v1); // Subscript - range checked at compile time
f = v1.at(42); // Subscript - range checked at run time
f = v1.x + v2.y; // GLSL-like. Access to 1 component only
float *fp = v1; // Implicit conversion to float *
ivec3 iv(5,6,7);
int ind = iv.indexof(6); // Find index of component - returns -1 if none
Vec, Vec/Vec and Vec/scalar operators:
v1 = -v2; // Unary - and +
if (!v1) {} // Check for all components zero
v1 += v2; // Also -=, *=, /=
v1 *= 2; // Also /=, +=, -=
v1 = v2 + v3 - v4; // Also *, / (all componentwise)
v1 = 3.5f * v2 + v3 / 2; // Also +, - (scalar can come first or second)
if (v1 == v2) {} // Also !=, <, >, <=, >=
std::set<vec> // This is why we need operator <
Other Vec/Vec and Vec/scalar functions:
v1.min(v2); // Set v1 to min of v1 and v2 - also max, clamp
v1.clamp(-1, 1); // Also min, max
v1 = min(v2, v3); // Componentwise min - also max, clamp
v1 = clamp(v2, 0, 1); // Componentwise clamp - also min, max
v1.swap(v2); // Swap - atomic (OpenMP)
swap(v1, v2); // Swap - non-atomic
Dot product. There are four ways to write the dot product of v1 and v2:
f = v1 DOT v2
f = v1 ^ v2
f = dot(v1, v2)
f = v1.dot(v2)
SMR prefers the first of these, but all are implemented to allow for
the range of personal preferences.
Cross product - only in 3 dimensions. The possible spellings are:
v1 = v2 CROSS v3;
v1 = v2 % v3;
v1 = cross(v2, v3);
v1 = v2.cross(v3);
Make a vector unit-length. This operates in-place (not GLSL compatible!):
normalize(v1);
and this is the version that leaves the original alone, emulating GLSL:
vec n = normalized(v1);
Functions on vecs:
f = len(v1); // Length - also spelled length()
f = len2(v1); // Squared length - also length2()
f = dist(p1, p2); // Distance - also distance()
f = dist2(p1, p2); // Squared distance - also distance2()
f = angle(v1, v2); // Angle between vectors
f = v1.sum(); // From valarrays - see other functions below
v1 = sin(v2); // Componentwise - see list of functions below
v1 = v2.apply(sin); // Componentwise - any one-argument function
v1 = reflect(v2, n); // Reflected vector - n must be unit-length
v1 = refract(v2, n, 1.5f); // Refracted vector - n must be unit-length
v1 = faceforward(v2,v3,v4); // v2 if (v3 DOT v4) > 0, -v2 otherwise
v1 = trinorm(p1,p2,p3); // Normal of triangle (area-weighted)
Input/output:
cout << v1 << endl; // iostream output in the form (1, 2, 3)
cin >> v2; // iostream input - see below for input format
*/
#include "mathutil.h"
#include <iterator>
#include <stdexcept>
#include <iostream>
#define inline TRIMESH_INLINE
namespace trimesh {
// Storage for Vecs
template <size_t D, class T>
struct Vec_data {
T v[D];
};
template <class T>
struct Vec_data<1,T> {
union {
T v[1];
struct { T x; };
struct { T r; };
struct { T s; };
};
};
template <class T>
struct Vec_data<2,T> {
union {
T v[2];
struct { T x, y; };
struct { T r, g; };
struct { T s, t; };
};
};
template <class T>
struct Vec_data<3,T> {
union {
T v[3];
struct { T x, y, z; };
struct { T r, g, b; };
struct { T s, t, p; };
};
};
template <class T>
struct Vec_data<4,T> {
union {
T v[4];
struct { T x, y, z, w; };
struct { T r, g, b, a; };
struct { T s, t, p, q; };
};
};
// Utility class for uninitialized constructor
struct Vec_uninitialized {};
#define VEC_UNINITIALIZED ((::trimesh::Vec_uninitialized *) 0)
// Vec class declaration
template <size_t D, class T = float>
class Vec : public Vec_data<D,T> {
protected:
// Force dependent name lookup for inherited v
using Vec_data<D,T>::v;
public:
// Types
typedef T value_type;
typedef value_type *pointer;
typedef const value_type *const_pointer;
typedef value_type &reference;
typedef const value_type &const_reference;
typedef value_type *iterator;
typedef const value_type *const_iterator;
typedef ::std::reverse_iterator<iterator> reverse_iterator;
typedef ::std::reverse_iterator<const_iterator> const_reverse_iterator;
typedef ::std::size_t size_type;
typedef ::std::ptrdiff_t difference_type;
// A type giving the result of any operation (e.g. length) that
// must produce a floating-point result. This is double for
// integral types, else just T itself.
typedef typename ::std::conditional< ::std::is_integral<T>::value,
double, T >::type float_type;
public:
// Constructor for no arguments - everything initialized to zero
inline Vec() : Vec_data<D,T>() {}
// Uninitialized constructor - meant mostly for internal use
inline explicit Vec(Vec_uninitialized *) {}
// Constructors for 2 - 4 arguments
inline Vec(const T &x_, const T &y_)
{ TRIMESH_STATIC_CHECK(D == 2); v[0] = x_; v[1] = y_; }
inline Vec(const T &x_, const T &y_, const T &z_)
{ TRIMESH_STATIC_CHECK(D == 3); v[0] = x_; v[1] = y_; v[2] = z_; }
inline Vec(const T &x_, const T &y_, const T &z_, const T &w_)
{ TRIMESH_STATIC_CHECK(D == 4); v[0] = x_; v[1] = y_; v[2] = z_; v[3] = w_; }
// Constructor for 1 scalar argument, which is duplicated into
// all components. Explicit.
template <class S>
inline explicit Vec(S x_,
typename ::std::enable_if< ::std::is_arithmetic<S>::value, void >::type * = 0)
{
for (size_type i = 0; i < D; i++)
v[i] = x_;
}
// Constructor for 1 argument that's a pointer, array, or
// anything else that can be accessed using []. Explicit.
template <class S>
inline explicit Vec(const S &v_,
typename ::std::enable_if< !::std::is_arithmetic<S>::value, void >::type * = 0)
{
for (size_type i = 0; i < D; i++)
v[i] = v_[i];
}
// Return a Vec full of uniformly-distributed random numbers
static inline Vec<D,T> uniform_rnd(T sigma = 1)
{
Vec result(VEC_UNINITIALIZED);
for (size_type i = 0; i < D; i++)
result[i] = ::trimesh::uniform_rnd(sigma);
return result;
}
// Return a Vec full of normally-distributed random numbers
static inline Vec<D,T> normal_rnd(T sigma = 1)
{
Vec result(VEC_UNINITIALIZED);
for (size_type i = 0; i < D; i++)
result[i] = ::trimesh::normal_rnd(sigma);
return result;
}
// Assignment operator equivalents of the one-parameter constructors
template <class S>
inline typename ::std::enable_if< ::std::is_arithmetic<S>::value, Vec & >::type
operator = (S x_)
{
for (size_type i = 0; i < D; i++)
v[i] = x_;
return *this;
}
template <class S>
inline typename ::std::enable_if< !::std::is_arithmetic<S>::value, Vec & >::type
operator = (const S &v_)
{
for (size_type i = 0; i < D; i++)
v[i] = v_[i];
return *this;
}
// Using default copy constructor, assignment operator, and destructor
// Array reference - no bounds checking
inline reference operator [] (size_type i)
{ return v[i]; }
inline reference operator [] (int i)
{ return v[i]; }
inline const_reference operator [] (size_type i) const
{ return v[i]; }
inline const_reference operator [] (int i) const
{ return v[i]; }
// Array reference with run-time bounds checking
inline reference at(size_type i)
{
if (i >= D)
throw ::std::out_of_range("Vec::at");
return v[i];
}
inline const_reference at(size_type i) const
{
if (i >= D)
throw ::std::out_of_range("Vec::at");
return v[i];
}
// Other accessors, for compatibility with std::array
inline reference front()
{ return v[0]; }
inline const_reference front() const
{ return v[0]; }
inline reference back()
{ return v[D-1]; }
inline const_reference back() const
{ return v[D-1]; }
// Conversion to pointer
inline operator T * ()
{ return v; }
inline operator const T * ()
{ return v; }
inline operator const T * () const
{ return v; }
inline pointer data()
{ return v; }
inline const_pointer data() const
{ return v; }
// Iterators
inline iterator begin()
{ return v; }
inline const_iterator begin() const
{ return v; }
inline const_iterator cbegin() const
{ return v; }
inline iterator end()
{ return begin() + D; }
inline const_iterator end() const
{ return begin() + D; }
inline const_iterator cend() const
{ return begin() + D; }
inline reverse_iterator rbegin()
{ return reverse_iterator(end()); }
inline const_reverse_iterator rbegin() const
{ return const_reverse_iterator(end()); }
inline const_reverse_iterator crbegin() const
{ return const_reverse_iterator(end()); }
inline reverse_iterator rend()
{ return reverse_iterator(begin()); }
inline const_reverse_iterator rend() const
{ return const_reverse_iterator(begin()); }
inline const_reverse_iterator crend() const
{ return const_reverse_iterator(begin()); }
// Capacity
inline size_type size() const
{ return D; }
inline size_type max_size() const
{ return D; }
// empty() - check for all components zero. Note that this definition
// of empty() is different from std::array, so it's marked deprecated.
TRIMESH_DEPRECATED
inline bool empty() const
{ return !(*this); }
// Set all components to zero
inline void clear()
{ for (size_type i = 0; i < D; i++) v[i] = 0; }
// Set all elements to some constant
inline void fill(const value_type &x_)
{
for (size_type i = 0; i < D; i++)
v[i] = x_;
}
inline void set(const value_type &x_)
{
for (size_type i = 0; i < D; i++)
v[i] = x_;
}
// Set elements to explicit values (only for dimensions 2-4)
inline void set(const T &x_, const T &y_)
{ TRIMESH_STATIC_CHECK(D == 2); v[0] = x_; v[1] = y_; }
inline void set(const T &x_, const T &y_, const T &z_)
{ TRIMESH_STATIC_CHECK(D == 3); v[0] = x_; v[1] = y_; v[2] = z_; }
inline void set(const T &x_, const T &y_, const T &z_, const T &w_)
{ TRIMESH_STATIC_CHECK(D == 4); v[0] = x_; v[1] = y_; v[2] = z_; v[3] = w_; }
// Componentwise Vec/Vec member operators.
// (*= and /= included, since some people actually want to do that...)
inline Vec &operator += (const Vec &v_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] += v_[i];
return *this;
}
inline Vec &operator -= (const Vec &v_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] -= v_[i];
return *this;
}
inline Vec &operator *= (const Vec &v_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] *= v_[i];
return *this;
}
inline Vec &operator /= (const Vec &v_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] /= v_[i];
return *this;
}
// Vec/scalar member operators.
// (+= and -= included, since some people actually want to do that...)
inline Vec &operator += (const T &x_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] += x_;
return *this;
}
inline Vec &operator -= (const T &x_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] -= x_;
return *this;
}
inline Vec &operator *= (const T &x_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] *= x_;
return *this;
}
inline Vec &operator /= (const T &x_)
{
for (size_type i = 0; i < D; i++)
#pragma omp atomic
v[i] /= x_;
return *this;
}
// Vec/scalar operators - these are friends so that implicit casting
// can happen on the scalar
inline friend const Vec operator + (const T &x, const Vec &v)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = x + v[i];
return result;
}
inline friend const Vec operator + (const Vec &v, const T &x)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v[i] + x;
return result;
}
inline friend const Vec operator - (const T &x, const Vec &v)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = x - v[i];
return result;
}
inline friend const Vec operator - (const Vec &v, const T &x)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v[i] - x;
return result;
}
inline friend const Vec operator * (const T &x, const Vec &v)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = x * v[i];
return result;
}
inline friend const Vec operator * (const Vec &v, const T &x)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v[i] * x;
return result;
}
inline friend const Vec operator / (const T &x, const Vec &v)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = x / v[i];
return result;
}
inline friend const Vec operator / (const Vec &v, const T &x)
{
using namespace ::std;
Vec result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v[i] / x;
return result;
}
// Comparing Vecs and scalars shouldn't work - too easy to make
// mistakes. The TRIMESH_STATIC_CHECKs below must depend on D to make
// clang not attempt to instantiate them when this file is parsed.
inline friend void operator == (const Vec &, const T &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator == (const T &, const Vec &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator != (const Vec &, const T &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator != (const T &, const Vec &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator > (const Vec &, const T &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator > (const T &, const Vec &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator >= (const Vec &, const T &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator >= (const T &, const Vec &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator < (const Vec &, const T &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator < (const T &, const Vec &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator <= (const Vec &, const T &)
{ TRIMESH_STATIC_CHECK(!D); }
inline friend void operator <= (const T &, const Vec &)
{ TRIMESH_STATIC_CHECK(!D); }
// Outside of class: Vec/Vec operators + - * / % ^ << >> == != < > <= >=
// Vec/Vec in-place min, max, and clamp
inline Vec &min(const Vec &v_)
{
for (size_type i = 0; i < D; i++)
#pragma omp critical
if (v[i] > v_[i]) v[i] = v_[i];
return *this;
}
inline Vec &max(const Vec &v_)
{
for (size_type i = 0; i < D; i++)
#pragma omp critical
if (v[i] < v_[i]) v[i] = v_[i];
return *this;
}
inline Vec &clamp(const Vec &a, const Vec &b)
{
for (size_type i = 0; i < D; i++) {
#pragma omp critical
if (v[i] > b[i])
v[i] = b[i];
else if (!(v[i] >= a[i]))
v[i] = a[i];
}
return *this;
}
// Vec/scalar in-place min, max, and clamp
inline Vec &min(const T &x_)
{
for (size_type i = 0; i < D; i++)
#pragma omp critical
if (v[i] > x_) v[i] = x_;
return *this;
}
inline Vec &max(const T &x_)
{
for (size_type i = 0; i < D; i++)
#pragma omp critical
if (v[i] < x_) v[i] = x_;
return *this;
}
inline Vec &clamp(const T &a, const T &b)
{
for (size_type i = 0; i < D; i++) {
#pragma omp critical
if (v[i] > b)
v[i] = b;
else if (!(v[i] >= a))
v[i] = a;
}
return *this;
}
// Swap with another vector. (Also exists as a global function.)
inline void swap(Vec &v_)
{
using namespace ::std;
#pragma omp critical
for (size_type i = 0; i < D; i++) swap(v[i], v_[i]);
}
// Dot product with another vector (also exists as operator ^)
inline value_type dot(const Vec &v_) const
{
value_type total = v[0] * v_[0];
for (size_type i = 1; i < D; i++)
total += v[i] * v_[i];
return total;
}
// Cross product with another vector (also exists as operator %)
inline Vec<3,T> cross(const Vec<3,T> &v_) const
{
TRIMESH_STATIC_CHECK(D == 3);
return Vec<3,T>(v[1] * v_[2] - v[2] * v_[1],
v[2] * v_[0] - v[0] * v_[2],
v[0] * v_[1] - v[1] * v_[0]);
}
// Some partial compatibility with std::valarray, plus generalizations
inline value_type sum() const
{
value_type total = v[0];
for (size_type i = 1; i < D; i++)
total += v[i];
return total;
}
inline value_type sumabs() const
{
using namespace ::std;
value_type total = abs(v[0]);
for (size_type i = 1; i < D; i++)
total += abs(v[i]);
return total;
}
inline value_type sumsqr() const
{
value_type total = sqr(v[0]);
for (size_type i = 1; i < D; i++)
total += sqr(v[i]);
return total;
}
inline float_type avg() const
{ return float_type(sum()) / D; }
inline float_type avgabs() const
{ return float_type(sumabs()) / D; }
inline float_type mean() const
{ return float_type(sum()) / D; }
inline float_type meanabs() const
{ return float_type(sumabs()) / D; }
inline float_type rms() const
{ using namespace ::std;
return sqrt(float_type(sumsqr()) / D); }
inline value_type product() const
{
value_type total = v[0];
for (size_type i = 1; i < D; i++)
total *= v[i];
return total;
}
inline value_type min() const
{
value_type m = v[0];
for (size_type i = 1; i < D; i++)
if (v[i] < m)
m = v[i];
return m;
}
inline value_type minabs() const
{
using namespace ::std;
value_type m = abs(v[0]);
for (size_type i = 1; i < D; i++) {
value_type absvi = abs(v[i]);
if (absvi < m)
m = absvi;
}
return m;
}
inline value_type max() const
{
value_type m = v[0];
for (size_type i = 1; i < D; i++)
if (v[i] > m)
m = v[i];
return m;
}
inline value_type maxabs() const
{
using namespace ::std;
value_type m = abs(v[0]);
for (size_type i = 1; i < D; i++) {
value_type absvi = abs(v[i]);
if (absvi > m)
m = absvi;
}
return m;
}
inline Vec apply(value_type func(value_type)) const
{
Vec result(VEC_UNINITIALIZED);
for (size_type i = 0; i < D; i++)
result[i] = func(v[i]);
return result;
}
inline Vec apply(value_type func(const value_type&)) const
{
Vec result(VEC_UNINITIALIZED);
for (size_type i = 0; i < D; i++)
result[i] = func(v[i]);
return result;
}
inline Vec cshift(int n) const
{
Vec result(VEC_UNINITIALIZED);
if (n < 0)
n = (n % D) + D;
for (size_type i = 0; i < D; i++)
result[i] = v[(i+n)%D];
return result;
}
inline Vec shift(int n) const
{
using namespace ::std;
if (unlikely(abs(n) >= D))
return Vec();
Vec result; // Must start as zero, so no VEC_UNINITIALIZED
size_type start = n < 0 ? -n : 0;
size_type stop = n > 0 ? D - n : D;
for (size_type i = start; i < stop; i++)
result[i] = v[i+n];
return result;
}
// Returns index of first element of the Vec that matches the
// given value exactly. Returns -1 if not found.
inline int indexof(const T &x_) const
{
for (size_t i = 0; i < D; i++) {
if (v[i] == x_)
return i;
}
return -1;
}
}; // class Vec
// Shorthands for particular flavors of Vecs
typedef Vec<3,float> vec;
typedef Vec<3,float> point;
typedef Vec<2,float> vec2;
typedef Vec<3,float> vec3;
typedef Vec<4,float> vec4;
typedef Vec<2,float> point2;
typedef Vec<3,float> point3;
typedef Vec<4,float> point4;
typedef Vec<2,int> ivec2;
typedef Vec<3,int> ivec3;
typedef Vec<4,int> ivec4;
typedef Vec<2,double> dvec2;
typedef Vec<3,double> dvec3;
typedef Vec<4,double> dvec4;
// Nonmember operators that take two Vecs
template <size_t D, class T>
static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] + v2[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] - v2[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] * v2[i];
return result;
}
template <size_t D, class T>
static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = v1[i] / v2[i];
return result;
}
// Dot product
template <size_t D, class T>
static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T sum = v1[0] * v2[0];
for (size_t i = 1; i < D; i++)
sum += v1[i] * v2[i];
return sum;
}
#define DOT ^
template <size_t D, class T>
static inline const T dot(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return v1 DOT v2;
}
// Cross product
template <class T>
static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2)
{
return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1],
v1[2]*v2[0] - v1[0]*v2[2],
v1[0]*v2[1] - v1[1]*v2[0]);
}
#define CROSS %
template <class T>
static inline const Vec<3,T> cross(const Vec<3,T> &v1, const Vec<3,T> &v2)
{
return v1 CROSS v2;
}
// Component-wise equality and inequality. These return a single bool,
// unlike valarrays, which return one bool per component.
// (#include the usual caveats about comparing floats for equality...)
template <size_t D, class T>
static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (size_t i = 0; i < D; i++)
if (v1[i] != v2[i])
return false;
return true;
}
template <size_t D, class T>
static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (size_t i = 0; i < D; i++)
if (v1[i] != v2[i])
return true;
return false;
}
// Comparison by lexicographical ordering - not necessarily useful on its own,
// but necessary in order to put Vecs in sets, maps, etc.
template <size_t D, class T>
static inline bool operator < (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (size_t i = 0; i < D; i++) {
if (v1[i] < v2[i])
return true;
else if (v1[i] != v2[i]) // Equivalent to > but catches NaN
return false;
}
return false;
}
template <size_t D, class T>
static inline bool operator > (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return v2 < v1;
}
template <size_t D, class T>
static inline bool operator <= (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
for (size_t i = 0; i < D; i++) {
if (v1[i] < v2[i])
return true;
else if (v1[i] != v2[i])
return false;
}
return true;
}
template <size_t D, class T>
static inline bool operator >= (const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return v2 <= v1;
}
// Unary + and -
template <size_t D, class T>
static inline const Vec<D,T> &operator + (const Vec<D,T> &v)
{
return v;
}
template <size_t D, class T>
static inline const Vec<D,T> operator - (const Vec<D,T> &v)
{
Vec<D,T> result(VEC_UNINITIALIZED);
for (size_t i = 0; i < D; i++)
result[i] = -v[i];
return result;
}
// Unary ! - check for all elements zero
template <size_t D, class T>
static inline bool operator ! (const Vec<D,T> &v)
{
for (size_t i = 0; i < D; i++)
if (v[i] != 0) return false;
return true;
}
// iostream output. Formats result as "(1, 2, 3)"
template <size_t D, class T>
static inline ::std::ostream &operator << (::std::ostream &os, const Vec<D,T> &v)
{
os << "(";
for (size_t i = 0; i < D - 1; i++)
os << v[i] << ", ";
return os << v[D-1] << ")";
}
// iostream input. Accepts the vec surrounded by (), [], or nothing,
// with components separated by comma, semicolon, or whitespace
template <size_t D, class T>
static inline ::std::istream &operator >> (::std::istream &is, Vec<D,T> &v)
{
using namespace ::std;
char c1 = 0, c2 = 0;
is >> c1;
if (c1 != '(' && c1 != '[') {
c1 = 0;
is.unget();
}
is >> v[0];
for (size_t i = 1; i < D; i++) {
is >> ws >> c2;
if (c2 != ',' && c2 != ';')
is.unget();
is >> v[i];
}
if (c1) {
is >> ws >> c2;
if (c1 == '(' && c2 != ')')
is.setstate(ios::failbit);
else if (c1 == '[' && c2 != ']')
is.setstate(ios::failbit);
}
if (!is.good())
v = Vec<D,T>();
return is;
}
// Vec functions based on GLSL - the scalar ones are in mathutil.h
template <size_t D, class T>
static inline Vec<D,T> faceforward(const Vec<D,T> &N, const Vec<D,T> &I,
const Vec<D,T> &Nref)
{
return ((Nref DOT I) < 0) ? N : -N;
}
template <size_t D, class T>
static inline Vec<D,T> reflect(const Vec<D,T> &I, const Vec<D,T> &N)
{
return I - (2 * (N DOT I)) * N;
}
template <size_t D, class T>
static inline Vec<D,T> refract(const Vec<D,T> &I, const Vec<D,T> &N,
const T &eta)
{
using namespace ::std;
T NdotI = N DOT I;
T k = 1 - sqr(eta) * (1 - sqr(NdotI));
if (unlikely(k < 0))
return Vec<D,T>();
else
return eta * I - (eta * NdotI * sqrt(k)) * N;
}
// Squared length
template <size_t D, class T>
static inline const T len2(const Vec<D,T> &v)
{
T l2 = sqr(v[0]);
for (size_t i = 1; i < D; i++)
l2 += sqr(v[i]);
return l2;
}
// Length
template <size_t D, class T>
static inline const typename Vec<D,T>::float_type
len(const Vec<D,T> &v)
{
using namespace ::std;
return sqrt(len2(v));
}
// Alternate, GLSL-compatible spelling of len2() and len()
template <size_t D, class T>
static inline const T length2(const Vec<D,T> &v)
{
return len2(v);
}
template <size_t D, class T>
static inline const typename Vec<D,T>::float_type
length(const Vec<D,T> &v)
{
return len(v);
}
// Squared distance
template <size_t D, class T>
static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
T d2 = sqr(v2[0] - v1[0]);
for (size_t i = 1; i < D; i++)
d2 += sqr(v2[i] - v1[i]);
return d2;
}
// Distance
template <size_t D, class T>
static inline const typename Vec<D,T>::float_type
dist(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
return sqrt(dist2(v1, v2));
}
// Alternate, GLSL-compatible spelling of dist2() and dist()
template <size_t D, class T>
static inline const T distance2(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return dist2(v1, v2);
}
template <size_t D, class T>
static inline const typename Vec<D,T>::float_type
distance(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
return dist(v1, v2);
}
// In-place normalization to unit length. For historical reasons, this is
// incompatible with the GLSL normalize() - that's implemented as normalized()
template <size_t D, class T>
static inline void normalize(Vec<D,T> &v)
{
T l = len(v);
if (likely(l > 0)) {
l = 1 / l;
for (size_t i = 0; i < D; i++)
v[i] *= l;
} else {
// Make sure we have sane output for length 0 and NaN
for (size_t i = 0; i < D - 1; i++)
v[i] = 0;
v[D-1] = 1;
}
}
// Returns a normalized vector while leaving the original alone
template <size_t D, class T>
static inline Vec<D,T> normalized(const Vec<D,T> &v)
{
Vec<D,T> w(v);
normalize(w);
return w;
}
// Area-weighted triangle face normal
template <class T>
static inline Vec<3,T> trinorm(const Vec<3,T> &v0, const Vec<3,T> &v1, const Vec<3,T> &v2)
{
return T(0.5) * ((v1 - v0) CROSS (v2 - v0));
}
// Angle between two vectors
template <size_t D, class T>
static inline const typename Vec<D,T>::float_type
angle(const Vec<D,T> &v1, const Vec<D,T> &v2)
{
using namespace ::std;
typedef typename Vec<D,T>::float_type FT;
// Formula from section 12 of
// http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Vec<D,FT> x(v1), y(v2);
x *= len(v2);
y *= len(v1);
return 2 * atan2(len(x-y), len(x+y));
}
} // namespace trimesh
// Generic macros for declaring 1-, 2-, and 3- argument
// componentwise functions on Vecs.
#define VEC_DECLARE_ONEARG(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i]); \
return result; \
}
// Vector-scalar, scalar-vector, and componentwise vector-vector versions
#define VEC_DECLARE_TWOARG_VS(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], a); \
return result; \
}
#define VEC_DECLARE_TWOARG_SV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const T &a, const trimesh::Vec<D,T> &v) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(a, v[i]); \
return result; \
}
#define VEC_DECLARE_TWOARG_VV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], w[i]); \
return result; \
}
#define VEC_DECLARE_THREEARG_VSS(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const T &a, const T &b) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], a, b); \
return result; \
}
#define VEC_DECLARE_THREEARG_SSV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const T &a, const T &b, const trimesh::Vec<D,T> &v) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(a, b, v[i]); \
return result; \
}
#define VEC_DECLARE_THREEARG_VVV(name) \
template < ::std::size_t D, class T > \
static inline trimesh::Vec<D,T> name(const trimesh::Vec<D,T> &v, const trimesh::Vec<D,T> &w, const trimesh::Vec<D,T> &x) \
{ \
using namespace ::std; \
using namespace ::trimesh; \
Vec<D,T> result(VEC_UNINITIALIZED); \
for (size_t i = 0; i < D; i++) \
result[i] = name(v[i], w[i], x[i]); \
return result; \
}
// The following is the list of functions in C89 and C++98,
// PLUS the ones in mathcompat.h (which are POSIX / C99 / C++11)
// MINUS frexp, ldexp, and modf (which have irregular calling conventions).
// They are supposed to be in namespace std, but Visual Studio and some
// older compilers also declare them in the global namespace.
// In the name of compatibility, we put them in both global and std.
VEC_DECLARE_ONEARG(abs)
VEC_DECLARE_ONEARG(acos)
VEC_DECLARE_ONEARG(acosh)
VEC_DECLARE_ONEARG(asin)
VEC_DECLARE_ONEARG(asinh)
VEC_DECLARE_ONEARG(atan)
VEC_DECLARE_ONEARG(atanh)
VEC_DECLARE_TWOARG_VV(atan2)
VEC_DECLARE_ONEARG(cbrt)
VEC_DECLARE_ONEARG(ceil)
VEC_DECLARE_ONEARG(cos)
VEC_DECLARE_ONEARG(cosh)
VEC_DECLARE_ONEARG(exp)
VEC_DECLARE_ONEARG(exp2)
VEC_DECLARE_ONEARG(expm1)
VEC_DECLARE_ONEARG(fabs)
VEC_DECLARE_TWOARG_VS(fdim)
VEC_DECLARE_TWOARG_SV(fdim)
VEC_DECLARE_TWOARG_VV(fdim)
VEC_DECLARE_ONEARG(floor)
VEC_DECLARE_TWOARG_VS(fmod)
VEC_DECLARE_TWOARG_VV(fmod)
VEC_DECLARE_TWOARG_VS(hypot)
VEC_DECLARE_TWOARG_SV(hypot)
VEC_DECLARE_TWOARG_VV(hypot)
VEC_DECLARE_ONEARG(log)
VEC_DECLARE_ONEARG(log10)
VEC_DECLARE_ONEARG(log1p)
VEC_DECLARE_ONEARG(log2)
VEC_DECLARE_TWOARG_VS(pow)
VEC_DECLARE_TWOARG_SV(pow)
VEC_DECLARE_TWOARG_VV(pow)
VEC_DECLARE_ONEARG(round)
VEC_DECLARE_ONEARG(sin)
VEC_DECLARE_ONEARG(sinh)
VEC_DECLARE_ONEARG(sqrt)
VEC_DECLARE_ONEARG(tan)
VEC_DECLARE_ONEARG(tanh)
VEC_DECLARE_ONEARG(trunc)
// Inject into namespace std
namespace std {
using ::abs;
using ::acos;
using ::asin;
using ::atan;
using ::atan2;
using ::cbrt;
using ::ceil;
using ::cos;
using ::cosh;
using ::exp;
using ::fabs;
using ::floor;
using ::fmod;
using ::hypot;
using ::log;
using ::log10;
using ::pow;
using ::round;
using ::sin;
using ::sinh;
using ::sqrt;
using ::tan;
using ::tanh;
using ::trunc;
// These are only in namespace std.
VEC_DECLARE_TWOARG_VS(min)
VEC_DECLARE_TWOARG_SV(min)
VEC_DECLARE_TWOARG_VV(min)
VEC_DECLARE_TWOARG_VS(max)
VEC_DECLARE_TWOARG_SV(max)
VEC_DECLARE_TWOARG_VV(max)
// Swap two Vecs. Not atomic, unlike class method.
template <size_t D, class T>
static inline void swap(const ::trimesh::Vec<D,T> &v1, const ::trimesh::Vec<D,T> &v2)
{
for (size_t i = 0; i < D; i++)
swap(v1[i], v2[i]);
}
// Get an element with compile-time bounds checking
template <size_t I, size_t D, class T>
static inline T &get(::trimesh::Vec<D,T> &v)
{
using namespace ::trimesh;
TRIMESH_STATIC_CHECK(I < D);
return v[I];
}
template <size_t I, size_t D, class T>
static inline const T &get(const ::trimesh::Vec<D,T> &v)
{
using namespace ::trimesh;
TRIMESH_STATIC_CHECK(I < D);
return v[I];
}
} // namespace std
// These are new functions declared in namespace trimesh (in mathutil.h)
namespace trimesh {
VEC_DECLARE_ONEARG(sqr)
VEC_DECLARE_ONEARG(cube)
VEC_DECLARE_ONEARG(sgn)
VEC_DECLARE_ONEARG(radians)
VEC_DECLARE_ONEARG(degrees)
VEC_DECLARE_ONEARG(fract)
VEC_DECLARE_THREEARG_VSS(clamp)
VEC_DECLARE_THREEARG_VVV(clamp)
VEC_DECLARE_TWOARG_SV(step)
VEC_DECLARE_TWOARG_VV(step)
VEC_DECLARE_THREEARG_SSV(smoothstep)
VEC_DECLARE_THREEARG_VVV(smoothstep)
} // namespace trimesh
#undef VEC_DECLARE_ONEARG
#undef VEC_DECLARE_TWOARG_VS
#undef VEC_DECLARE_TWOARG_SV
#undef VEC_DECLARE_TWOARG_VV
#undef VEC_DECLARE_THREEARG_VSS
#undef VEC_DECLARE_THREEARG_SSV
#undef VEC_DECLARE_THREEARG_VVV
#undef inline
#endif
|
ccl_power.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <gsl/gsl_integration.h>
#include <gsl/gsl_interp.h>
#include <gsl/gsl_spline.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_sf_bessel.h>
#include "ccl.h"
#include "ccl_f2d.h"
#include "ccl_emu17.h"
#include "ccl_emu17_params.h"
// helper functions for BBKS and EH98
static double bbks_power(ccl_parameters *params, void *p, double k) {
return ccl_bbks_power(params, k);
}
static double eh_power(ccl_parameters *params, void *p, double k) {
return ccl_eh_power(params, (eh_struct*)p, k);
}
/*------ ROUTINE: ccl_cosmology_compute_power_analytic -----
INPUT: cosmology
TASK: provide spline for an analytic power spectrum with baryonic correction
*/
static ccl_f2d_t *ccl_compute_linpower_analytic(ccl_cosmology* cosmo, void* par,
double (*pk)(ccl_parameters* params,
void* p, double k),
int* status) {
ccl_f2d_t *psp_out = NULL;
double sigma8,log_sigma8;
//These are the limits of the splining range
double kmin = cosmo->spline_params.K_MIN;
double kmax = cosmo->spline_params.K_MAX;
//Compute nk from number of decades and N_K = # k per decade
double ndecades = log10(kmax) - log10(kmin);
int nk = (int)ceil(ndecades*cosmo->spline_params.N_K);
// Compute na using predefined spline spacing
double amin = cosmo->spline_params.A_SPLINE_MINLOG_PK;
double amax = cosmo->spline_params.A_SPLINE_MAX;
int na = cosmo->spline_params.A_SPLINE_NA_PK+cosmo->spline_params.A_SPLINE_NLOG_PK-1;
// Exit if sigma8 wasn't specified
if (isnan(cosmo->params.sigma8)) {
*status = CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_linpower_analytic(): "
"sigma8 not set, required for analytic power spectra\n");
return NULL;
}
// The x array is initially k, but will later
// be overwritten with log(k)
double *x=NULL, *y=NULL, *z=NULL, *y2d=NULL;
x=ccl_log_spacing(kmin, kmax, nk);
if(x==NULL) {
*status = CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_linpower_analytic(): "
"memory allocation\n");
}
if(*status==0) {
y=malloc(sizeof(double)*nk);
if(y==NULL) {
*status = CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_linpower_analytic(): "
"memory allocation\n");
}
}
if(*status==0) {
z=ccl_linlog_spacing(amin, cosmo->spline_params.A_SPLINE_MIN_PK,
amax, cosmo->spline_params.A_SPLINE_NLOG_PK,
cosmo->spline_params.A_SPLINE_NA_PK);
if(z==NULL) {
*status = CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_linpower_analytic(): "
"memory allocation\n");
}
}
if(*status==0) {
y2d = malloc(nk * na * sizeof(double));
if(y2d==NULL) {
*status = CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_linpower_analytic(): "
"memory allocation\n");
}
}
if(*status==0) {
// Calculate P(k) on k grid. After this loop, x will contain log(k) and y
// will contain log(pk) [which has not yet been normalized]
// After this loop x will contain log(k)
for (int i=0; i<nk; i++) {
y[i] = log((*pk)(&cosmo->params, par, x[i]));
x[i] = log(x[i]);
}
}
if(*status==0) {
for (int j = 0; j < na; j++) {
double gfac = ccl_growth_factor(cosmo,z[j], status);
double g2 = 2.*log(gfac);
for (int i=0; i<nk; i++) {
y2d[j*nk+i] = y[i]+g2;
}
}
}
if(*status==0) {
psp_out=ccl_f2d_t_new(na,z,nk,x,y2d,NULL,NULL,0,
1,2,ccl_f2d_cclgrowth,1,0,2,
ccl_f2d_3,status);
}
if(*status==0) {
sigma8 = ccl_sigma8(cosmo, psp_out, status);
}
if(*status==0) {
// Calculate normalization factor using computed value of sigma8, then
// recompute P(k, a) using this normalization
log_sigma8 = 2*(log(cosmo->params.sigma8) - log(sigma8));
for(int j=0;j<na*nk;j++)
y2d[j] += log_sigma8;
}
if(*status==0) {
// Free the previous P(k,a) spline, and allocate a new one to store the
// properly-normalized P(k,a)
ccl_f2d_t_free(psp_out);
psp_out = ccl_f2d_t_new(na,z,nk,x,y2d,NULL,NULL,0,
1,2,ccl_f2d_cclgrowth,1,0,2,
ccl_f2d_3,status);
}
free(x);
free(y);
free(z);
free(y2d);
return psp_out;
}
ccl_f2d_t *ccl_compute_linpower_bbks(ccl_cosmology *cosmo, int *status)
{
ccl_f2d_t *psp=ccl_compute_linpower_analytic(cosmo, NULL, bbks_power, status);
return psp;
}
ccl_f2d_t *ccl_compute_linpower_eh(ccl_cosmology *cosmo, int wiggled, int *status)
{
ccl_f2d_t *psp = NULL;
eh_struct *eh = NULL;
eh = ccl_eh_struct_new(&(cosmo->params),wiggled);
if (eh != NULL) {
psp=ccl_compute_linpower_analytic(cosmo, eh,
eh_power,
status);
}
else
*status = CCL_ERROR_MEMORY;
free(eh);
return psp;
}
/*------ ROUTINE: ccl_compute_power_emu -----
INPUT: cosmology
TASK: provide spline for the emulated power spectrum from Cosmic EMU
*/
ccl_f2d_t *ccl_compute_power_emu(ccl_cosmology * cosmo, int * status)
{
double Omeganuh2_eq;
ccl_f2d_t *psp_out=NULL;
// Check ranges to see if the cosmology is valid
if(*status==0) {
if((cosmo->params.h<0.55) || (cosmo->params.h>0.85)){
*status=CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"h is outside allowed range\n");
}
}
if(*status==0) {
// Check if the cosmology has been set up with equal neutrino masses for the emulator
// If not, check if the user has forced redistribution of masses and if so do this.
if(cosmo->params.N_nu_mass>0) {
if (cosmo->config.emulator_neutrinos_method == ccl_emu_strict){
if (cosmo->params.N_nu_mass==3){
if (cosmo->params.m_nu[0] != cosmo->params.m_nu[1] ||
cosmo->params.m_nu[0] != cosmo->params.m_nu[2] ||
cosmo->params.m_nu[1] != cosmo->params.m_nu[2]){
*status = CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"In the default configuration, you must pass a list of 3 "
"equal neutrino masses or pass a sum and set "
"m_nu_type = 'equal'. If you wish to over-ride this, "
"set config->emulator_neutrinos_method = "
"'ccl_emu_equalize'. This will force the neutrinos to "
"be of equal mass but will result in "
"internal inconsistencies.\n");
}
}else if (cosmo->params.N_nu_mass!=3){
*status = CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"In the default configuration, you must pass a list of 3 "
"equal neutrino masses or pass a sum and set "
"m_nu_type = 'equal'. If you wish to over-ride this, "
"set config->emulator_neutrinos_method = "
"'ccl_emu_equalize'. This will force the neutrinos to "
"be of equal mass but will result in "
"internal inconsistencies.\n");
}
}else if (cosmo->config.emulator_neutrinos_method == ccl_emu_equalize){
// Reset the masses to equal
double mnu_eq[3] = {cosmo->params.sum_nu_masses / 3.,
cosmo->params.sum_nu_masses / 3.,
cosmo->params.sum_nu_masses / 3.};
Omeganuh2_eq = ccl_Omeganuh2(1.0, 3, mnu_eq, cosmo->params.T_CMB, status);
}
} else {
if(fabs(cosmo->params.N_nu_rel - 3.04)>1.e-6){
*status=CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"Set Neff = 3.04 for cosmic emulator predictions in "
"absence of massive neutrinos.\n");
}
}
}
if(*status==0) {
double w0wacomb = -cosmo->params.w0 - cosmo->params.wa;
if(w0wacomb<8.1e-3){ //0.3^4
*status=CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"w0 and wa do not satisfy the emulator bound\n");
}
}
if(*status==0) {
if(cosmo->params.Omega_nu_mass*cosmo->params.h*cosmo->params.h>0.01){
*status=CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"Omega_nu does not satisfy the emulator bound\n");
}
}
if(*status==0) {
// Check to see if sigma8 was defined
if(isnan(cosmo->params.sigma8)){
*status=CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"sigma8 is not defined; specify sigma8 instead of A_s\n");
}
}
int na=cosmo->spline_params.A_SPLINE_NA_PK;
double *lpk_1a=NULL,*lk=NULL,*aemu=NULL,*lpk_nl=NULL;
if (*status == 0) {
//Now start the NL computation with the emulator
//These are the limits of the splining range
aemu = ccl_linear_spacing(A_MIN_EMU,cosmo->spline_params.A_SPLINE_MAX, na);
if(aemu==NULL) {
*status=CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"memory allocation error\n");
}
}
if (*status == 0) {
lk=malloc(NK_EMU*sizeof(double));
if(lk==NULL) {
*status=CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"memory allocation error\n");
}
}
if (*status == 0) {
//The emulator only computes power spectra at fixed nodes in k,
//given by the global variable "mode"
for (int i=0; i<NK_EMU; i++)
lk[i] = log(mode[i]);
}
if (*status == 0) {
lpk_nl = malloc(NK_EMU * na * sizeof(double));
if(lpk_nl==NULL) {
*status=CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"memory allocation error\n");
}
}
if (*status == 0) {
lpk_1a=malloc(NK_EMU*sizeof(double));
if(lpk_1a==NULL) {
*status=CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"memory allocation error\n");
}
}
if (*status == 0) {
double emu_par[9];
//For each redshift:
for (int j = 0; j < na; j++){
//Turn cosmology into emu_par:
emu_par[0] = (cosmo->params.Omega_c+cosmo->params.Omega_b)*cosmo->params.h*cosmo->params.h;
emu_par[1] = cosmo->params.Omega_b*cosmo->params.h*cosmo->params.h;
emu_par[2] = cosmo->params.sigma8;
emu_par[3] = cosmo->params.h;
emu_par[4] = cosmo->params.n_s;
emu_par[5] = cosmo->params.w0;
emu_par[6] = cosmo->params.wa;
if ((cosmo->params.N_nu_mass>0) &&
(cosmo->config.emulator_neutrinos_method == ccl_emu_equalize)){
emu_par[7] = Omeganuh2_eq;
}else{
emu_par[7] = cosmo->params.Omega_nu_mass*cosmo->params.h*cosmo->params.h;
}
emu_par[8] = 1./aemu[j]-1;
//Need to have this here because otherwise overwritten by emu in each loop
//Call emulator at this redshift
ccl_pkemu(emu_par,NK_EMU,lpk_1a, status, cosmo);
if (*status) {
*status=CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_compute_power_emu(): "
"memory allocation error\n");
break;
}
for (int i=0; i<NK_EMU; i++)
lpk_nl[j*NK_EMU+i] = log(lpk_1a[i]);
}
}
if(*status==0) {
psp_out=ccl_f2d_t_new(na,aemu,NK_EMU,lk,lpk_nl,NULL,NULL,0,
1,2,ccl_f2d_no_extrapol,
1,0,2,ccl_f2d_3,status);
}
free(lpk_1a);
free(lk);
free(aemu);
free(lpk_nl);
return psp_out;
}
ccl_f2d_t *ccl_apply_halofit(ccl_cosmology* cosmo, ccl_f2d_t *plin, int *status)
{
ccl_f2d_t *psp_out=NULL;
size_t nk, na;
double *x, *z, *y2d=NULL;
//Halofit structure
halofit_struct *hf=NULL;
hf = ccl_halofit_struct_new(cosmo, plin, status);
if(*status == 0) {
//Find lk array
if(plin->fk != NULL) {
nk = plin->fk->size;
x = plin->fk->x;
}
else {
nk = plin->fka->interp_object.xsize;
x = plin->fka->xarr;
}
//Find a array
if(plin->fa != NULL) {
na = plin->fa->size;
z = plin->fa->x;
}
else {
na = plin->fka->interp_object.ysize;
z = plin->fka->yarr;
}
//Allocate pka array
y2d = malloc(nk * na * sizeof(double));
if (y2d == NULL) {
*status = CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_apply_halofit(): memory allocation\n");
}
}
if (*status == 0) {
// Calculate P(k) on a, k grid. After this loop, x will contain log(k) and y
// will contain log(pk) [which has not yet been normalized]
for (int j = 0; j<na; j++) {
for (int i=0; i<nk; i++) {
if (*status == 0) {
double pk = ccl_halofit_power(cosmo, plin, x[i], z[j], hf, status);
y2d[j*nk + i] = log(pk);
}
}
}
}
if(*status == 0)
psp_out = ccl_f2d_t_new(na, z, nk, x, y2d, NULL, NULL, 0,
1, 2, ccl_f2d_cclgrowth, 1,
0, 2, ccl_f2d_3, status);
free(y2d);
ccl_halofit_struct_free(hf);
return psp_out;
}
void ccl_rescale_linpower(ccl_cosmology* cosmo, ccl_f2d_t *psp,
int rescale_mg, int rescale_norm,
int *status)
{
if(rescale_mg || rescale_norm)
ccl_rescale_musigma_s8(cosmo, psp, rescale_mg, status);
}
// Params for sigma(R) integrand
typedef struct {
ccl_cosmology *cosmo;
double R;
double a;
ccl_f2d_t *psp;
int* status;
} SigmaR_pars;
typedef struct {
ccl_cosmology *cosmo;
double R;
double a;
ccl_f2d_t *psp;
int* status;
} SigmaV_pars;
// Params for k_NL integrand
typedef struct {
ccl_cosmology *cosmo;
double a;
ccl_f2d_t *psp;
int* status;
} KNL_pars;
/* --------- ROUTINE: w_tophat ---------
INPUT: kR, ususally a wavenumber multiplied by a smoothing radius
TASK: Output W(x)=[sin(x)-x*cos(x)]*(3/x)^3
*/
static double w_tophat_2d(double kR) {
double w;
double kR2 = kR*kR;
// This is the Maclaurin expansion of W(x)=2 J1(x)/x to O(x^10), with x=kR.
// Necessary numerically because at low x W(x) relies on the fine cancellation of two terms
if(kR<0.1) {
w= 1. + kR2*(-1.0/8.0 + kR2*(1.0/192.0 +
kR2*(-1.0/9216.0 + kR2*(1.0/737280.0 +
kR2* (-1.0/88473600.0)))));
}
else
w = 2 * gsl_sf_bessel_J1(kR) / kR;
return w;
}
// Integrand for sigmaB integral (used for the SSC covariance calculation)
static double sigma2B_integrand(double lk,void *params) {
SigmaR_pars *par=(SigmaR_pars *)params;
double k=pow(10.,lk);
double pk=ccl_f2d_t_eval(par->psp, lk * M_LN10, par->a,
par->cosmo, par->status);
double kR=k*par->R;
double w = w_tophat_2d(kR);
return pk*k*k*w*w;
}
/* --------- ROUTINE: ccl_sigmaB ---------
INPUT: cosmology, comoving smoothing radius, scale factor
TASK: compute sigmaB, the variance in the projected *linear* density field
smoothed with a 2D tophat filter of comoving size R
*/
double ccl_sigma2B(ccl_cosmology *cosmo,double R,double a,ccl_f2d_t *psp, int *status)
{
SigmaR_pars par;
par.status = status;
par.cosmo=cosmo;
par.R=R;
par.a=a;
par.psp=psp;
gsl_integration_cquad_workspace *workspace = NULL;
gsl_function F;
F.function=&sigma2B_integrand;
F.params=∥
double sigma_B;
workspace = gsl_integration_cquad_workspace_alloc(cosmo->gsl_params.N_ITERATION);
if (workspace == NULL) {
*status = CCL_ERROR_MEMORY;
}
if (*status == 0) {
int gslstatus = gsl_integration_cquad(&F,
log10(cosmo->spline_params.K_MIN),
log10(cosmo->spline_params.K_MAX),
0.0, cosmo->gsl_params.INTEGRATION_SIGMAR_EPSREL,
workspace,&sigma_B,NULL,NULL);
if(gslstatus != GSL_SUCCESS) {
ccl_raise_gsl_warning(gslstatus, "ccl_power.c: ccl_sigma2B():");
*status |= gslstatus;
}
}
gsl_integration_cquad_workspace_free(workspace);
return sigma_B*M_LN10/(2*M_PI);
}
void ccl_sigma2Bs(ccl_cosmology *cosmo,int na, double *a, double *R,
double *sigma2B_out, ccl_f2d_t *psp, int *status) {
#pragma omp parallel default(none) \
shared(cosmo, na, a, R, psp, sigma2B_out, status)
{
int ia;
int local_status=*status;
#pragma omp for
for(ia=0; ia<na; ia++) {
if(local_status==0)
sigma2B_out[ia]=ccl_sigma2B(cosmo,R[ia],a[ia],psp,&local_status);
} //end omp for
if(local_status) {
#pragma omp atomic write
*status=local_status;
}
} //end omp parallel
if(*status) {
ccl_cosmology_set_status_message(cosmo,
"ccl_power.c: ccl_sigma2Bs(): "
"integration error\n");
}
}
/* --------- ROUTINE: w_tophat ---------
INPUT: kR, ususally a wavenumber multiplied by a smoothing radius
TASK: Output W(x)=[sin(x)-x*cos(x)]*(3/x)^3
*/
static double w_tophat(double kR) {
double w;
double kR2 = kR*kR;
// This is the Maclaurin expansion of W(x)=[sin(x)-xcos(x)]*3/x**3 to O(x^10), with x=kR.
// Necessary numerically because at low x W(x) relies on the fine cancellation of two terms
if(kR<0.1) {
w= 1. + kR2*(-1.0/10.0 + kR2*(1.0/280.0 +
kR2*(-1.0/15120.0 + kR2*(1.0/1330560.0 +
kR2* (-1.0/172972800.0)))));
}
else
w = 3.*(sin(kR) - kR*cos(kR))/(kR2*kR);
return w;
}
// Integrand for sigmaR integral
static double sigmaR_integrand(double lk,void *params) {
SigmaR_pars *par=(SigmaR_pars *)params;
double k=pow(10.,lk);
double pk=ccl_f2d_t_eval(par->psp, lk * M_LN10, par->a,
par->cosmo, par->status);
double kR=k*par->R;
double w = w_tophat(kR);
return pk*k*k*k*w*w;
}
// Integrand for sigmaV integral
static double sigmaV_integrand(double lk,void *params) {
SigmaV_pars *par=(SigmaV_pars *)params;
double k=pow(10.,lk);
double pk=ccl_f2d_t_eval(par->psp, lk * M_LN10, par->a,
par->cosmo, par->status);
double kR=k*par->R;
double w = w_tophat(kR);
return pk*k*w*w/3.0;
}
/* --------- ROUTINE: ccl_sigmaR ---------
INPUT: cosmology, comoving smoothing radius, scale factor
TASK: compute sigmaR, the variance in the *linear* density field
smoothed with a tophat filter of comoving size R
*/
double ccl_sigmaR(ccl_cosmology *cosmo,double R,double a,ccl_f2d_t *psp, int *status) {
SigmaR_pars par;
par.status = status;
par.cosmo=cosmo;
par.R=R;
par.a=a;
par.psp=psp;
gsl_integration_cquad_workspace *workspace = NULL;
gsl_function F;
F.function=&sigmaR_integrand;
F.params=∥
double sigma_R;
workspace = gsl_integration_cquad_workspace_alloc(cosmo->gsl_params.N_ITERATION);
if (workspace == NULL) {
*status = CCL_ERROR_MEMORY;
}
if (*status == 0) {
int gslstatus = gsl_integration_cquad(&F,
log10(cosmo->spline_params.K_MIN),
log10(cosmo->spline_params.K_MAX),
0.0, cosmo->gsl_params.INTEGRATION_SIGMAR_EPSREL,
workspace,&sigma_R,NULL,NULL);
if(gslstatus != GSL_SUCCESS) {
ccl_raise_gsl_warning(gslstatus, "ccl_power.c: ccl_sigmaR():");
*status |= gslstatus;
}
}
gsl_integration_cquad_workspace_free(workspace);
return sqrt(sigma_R*M_LN10/(2*M_PI*M_PI));
}
/* --------- ROUTINE: ccl_sigmaV ---------
INPUT: cosmology, comoving smoothing radius, scale factor
TASK: compute sigmaV, the variance in the *linear* displacement field
smoothed with a tophat filter of comoving size R
The linear displacement field is the gradient of the linear density field
*/
double ccl_sigmaV(ccl_cosmology *cosmo,double R,double a,ccl_f2d_t *psp, int *status) {
SigmaV_pars par;
par.status = status;
par.cosmo=cosmo;
par.R=R;
par.a=a;
par.psp=psp;
gsl_integration_cquad_workspace *workspace = NULL;
gsl_function F;
F.function=&sigmaV_integrand;
F.params=∥
double sigma_V;
workspace = gsl_integration_cquad_workspace_alloc(cosmo->gsl_params.N_ITERATION);
if (workspace == NULL) {
*status = CCL_ERROR_MEMORY;
}
if (*status == 0) {
int gslstatus = gsl_integration_cquad(&F,
log10(cosmo->spline_params.K_MIN),
log10(cosmo->spline_params.K_MAX),
0.0, cosmo->gsl_params.INTEGRATION_SIGMAR_EPSREL,
workspace,&sigma_V,NULL,NULL);
if(gslstatus != GSL_SUCCESS) {
ccl_raise_gsl_warning(gslstatus, "ccl_power.c: ccl_sigmaV():");
*status |= gslstatus;
}
}
gsl_integration_cquad_workspace_free(workspace);
return sqrt(sigma_V*M_LN10/(2*M_PI*M_PI));
}
/* --------- ROUTINE: ccl_sigma8 ---------
INPUT: cosmology
TASK: compute sigma8, the variance in the *linear* density field at a=1
smoothed with a tophat filter of comoving size 8 Mpc/h
*/
double ccl_sigma8(ccl_cosmology *cosmo, ccl_f2d_t *psp, int *status) {
return ccl_sigmaR(cosmo, 8/cosmo->params.h, 1., psp, status);
}
// Integrand for kNL integral
static double kNL_integrand(double k,void *params) {
KNL_pars *par=(KNL_pars *)params;
double pk=ccl_f2d_t_eval(par->psp, log(k), par->a,
par->cosmo, par->status);
return pk;
}
/* --------- ROUTINE: ccl_kNL ---------
INPUT: cosmology, scale factor
TASK: compute kNL, the scale for the non-linear cut
*/
double ccl_kNL(ccl_cosmology *cosmo,double a,ccl_f2d_t *psp, int *status) {
KNL_pars par;
par.status = status;
par.a = a;
par.psp=psp;
par.cosmo=cosmo;
gsl_integration_cquad_workspace *workspace = NULL;
gsl_function F;
F.function=&kNL_integrand;
F.params=∥
double PL_integral;
workspace = gsl_integration_cquad_workspace_alloc(cosmo->gsl_params.N_ITERATION);
if (workspace == NULL) {
*status = CCL_ERROR_MEMORY;
}
if (*status == 0) {
int gslstatus = gsl_integration_cquad(&F, cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX,
0.0, cosmo->gsl_params.INTEGRATION_KNL_EPSREL,
workspace,&PL_integral,NULL,NULL);
if(gslstatus != GSL_SUCCESS) {
ccl_raise_gsl_warning(gslstatus, "ccl_power.c: ccl_kNL():");
*status |= gslstatus;
}
}
gsl_integration_cquad_workspace_free(workspace);
double sigma_eta = sqrt(PL_integral/(6*M_PI*M_PI));
return pow(sigma_eta, -1);
}
|
singleModificado2.c | #include <stdio.h>
#include <omp.h>
int main(int argc, char ** argv) {
int n = 9, i, a, b[n];
for (i=0; i<n; i++) b[i] = -1;
#pragma omp parallel
{
#pragma omp single
{
printf("Introduce valor de inicialización a: ");
scanf("%d", &a );
printf("Single ejecutada por el thread %d\n",
omp_get_thread_num());
}
#pragma omp for
for (i=0; i<n; i++)
b[i] = a;
#pragma omp master
{
printf("Master ejecutada por el thread %d\n",
omp_get_thread_num());
for (i=0; i<n; i++)
printf("b[%d] = %d\t",i,b[i]);
printf("\n");
}
}
return(0);
}
|
convolution_pack8to16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_pack8to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int maxk = kernel_w * kernel_h;
// kernel offsets
std::vector<int> _space_ofs(maxk);
int* space_ofs = &_space_ofs[0];
{
int p1 = 0;
int p2 = 0;
int gap = w * dilation_h - kernel_w * dilation_w;
for (int i = 0; i < kernel_h; i++)
{
for (int j = 0; j < kernel_w; j++)
{
space_ofs[p1] = p2;
p1++;
p2 += dilation_w;
}
p2 += gap;
}
}
const float* bias_data_ptr = bias_data;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* outptr = top_blob.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
__m512 _sum = _mm512_setzero_ps();
if (bias_data_ptr)
{
_sum = _mm512_loadu_ps(bias_data_ptr + p * 16);
}
const float* kptr = weight_data_packed.channel(p);
// channels
for (int q = 0; q < channels; q++)
{
const Mat m = bottom_blob.channel(q);
const float* sptr = m.row(i * stride_h) + j * stride_w * 8;
for (int k = 0; k < maxk; k++)
{
const float* slptr = sptr + space_ofs[k] * 8;
__m512 _val0 = _mm512_set1_ps(slptr[0]);
__m512 _val1 = _mm512_set1_ps(slptr[1]);
__m512 _val2 = _mm512_set1_ps(slptr[2]);
__m512 _val3 = _mm512_set1_ps(slptr[3]);
__m512 _val4 = _mm512_set1_ps(slptr[4]);
__m512 _val5 = _mm512_set1_ps(slptr[5]);
__m512 _val6 = _mm512_set1_ps(slptr[6]);
__m512 _val7 = _mm512_set1_ps(slptr[7]);
__m512 _w0 = _mm512_load_ps(kptr + 16 * 0);
__m512 _w1 = _mm512_load_ps(kptr + 16 * 1);
__m512 _w2 = _mm512_load_ps(kptr + 16 * 2);
__m512 _w3 = _mm512_load_ps(kptr + 16 * 3);
__m512 _w4 = _mm512_load_ps(kptr + 16 * 4);
__m512 _w5 = _mm512_load_ps(kptr + 16 * 5);
__m512 _w6 = _mm512_load_ps(kptr + 16 * 6);
__m512 _w7 = _mm512_load_ps(kptr + 16 * 7);
_sum = _mm512_fmadd_ps(_val0, _w0, _sum);
_sum = _mm512_fmadd_ps(_val1, _w1, _sum);
_sum = _mm512_fmadd_ps(_val2, _w2, _sum);
_sum = _mm512_fmadd_ps(_val3, _w3, _sum);
_sum = _mm512_fmadd_ps(_val4, _w4, _sum);
_sum = _mm512_fmadd_ps(_val5, _w5, _sum);
_sum = _mm512_fmadd_ps(_val6, _w6, _sum);
_sum = _mm512_fmadd_ps(_val7, _w7, _sum);
kptr += 128;
}
}
_sum = activation_avx512(_sum, activation_type, activation_params);
_mm512_store_ps(outptr, _sum);
outptr += 16;
}
}
}
}
|
ast-dump-openmp-critical.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test() {
#pragma omp critical
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-critical.c:3:1, line:6:1> line:3:6 test 'void ()'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:13, line:6:1>
// CHECK-NEXT: `-OMPCriticalDirective {{.*}} <line:4:1, col:21>
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:5:3>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK-NEXT: |-NullStmt {{.*}} <col:3> openmp_structured_block
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-critical.c:4:1) *const restrict'
|
pooling_2x2_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void pooling2x2s2_max_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 16;
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
float* outptr = top_blob.channel(q);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__m512 _r00 = _mm512_loadu_ps(r0);
__m512 _r01 = _mm512_loadu_ps(r0 + 16);
__m512 _r10 = _mm512_loadu_ps(r1);
__m512 _r11 = _mm512_loadu_ps(r1 + 16);
__m512 _max0 = _mm512_max_ps(_r00, _r01);
__m512 _max1 = _mm512_max_ps(_r10, _r11);
__m512 _max = _mm512_max_ps(_max0, _max1);
_mm512_storeu_ps(outptr, _max);
r0 += 32;
r1 += 32;
outptr += 16;
}
r0 += tailstep;
r1 += tailstep;
}
}
}
|
DRB094-doall2-ordered-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
ordered(2) is used to associate two loops with omp for.
The corresponding loop iteration variables are private.
ordered(n) is an OpenMP 4.5 addition.
*/
#include <stdio.h>
#include <omp.h>
int a[100][100];
int main()
{
int i;
int j;
#pragma omp parallel for private (i,j)
for (i = 0; i <= 99; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= 99; j += 1) {
a[i][j] = i + j;
}
}
for (i = 0; i <= 99; i += 1) {
for (j = 0; j <= 99; j += 1) {
a[i][j] = a[i][j] + 1;
printf("test i=%d j=%d\n",i,j);
}
}
return 0;
}
|
NAL.c | /*
* The MIT License
*
* Copyright 2020 The OpenNARS authors.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "NAL.h"
int ruleID = 0;
static void NAL_GeneratePremisesUnifier(int i, Atom atom, int premiseIndex)
{
if(atom)
{
//upper case atoms are treated as variables in the meta rule language
if(Narsese_atomNames[atom-1][0] >= 'A' && Narsese_atomNames[atom-1][0] <= 'Z')
{
//unification failure by inequal value assignment (value at position i versus previously assigned one), and variable binding
printf("subtree = Term_ExtractSubterm(&term%d, %d);\n", premiseIndex, i);
printf("if(substitutions[%d].atoms[0]!=0 && !Term_Equal(&substitutions[%d], &subtree)){ goto RULE_%d; }\n", atom, atom, ruleID);
printf("substitutions[%d] = subtree;\n", atom);
}
else
{
//structural constraint given by copulas at position i
printf("if(term%d.atoms[%d] != %d){ goto RULE_%d; }\n", premiseIndex, i, atom, ruleID);
}
}
}
static void NAL_GenerateConclusionSubstitution(int i, Atom atom)
{
if(atom)
{
if(Narsese_atomNames[atom-1][0] >= 'A' && Narsese_atomNames[atom-1][0] <= 'Z')
{
//conclusion term gets variables substituted
printf("if(!Term_OverrideSubterm(&conclusion,%d,&substitutions[%d])){ goto RULE_%d; }\n", i, atom, ruleID);
}
else
{
//conclusion term inherits structure from meta rule, namely the copula
printf("conclusion.atoms[%d] = %d;\n", i, atom);
}
}
}
static void NAL_GenerateConclusionTerm(char *premise1, char *premise2, char* conclusion, bool doublePremise)
{
Term term1 = Narsese_Term(premise1);
Term term2 = doublePremise ? Narsese_Term(premise2) : (Term) {0};
Term conclusion_term = Narsese_Term(conclusion);
printf("RULE_%d:\n{\n", ruleID++);
//skip double/single premise rule if single/double premise
if(doublePremise) { printf("if(!doublePremise) { goto RULE_%d; }\n", ruleID); }
if(!doublePremise) { printf("if(doublePremise) { goto RULE_%d; }\n", ruleID); }
puts("Term substitutions[TERMS_MAX] = {0}; Term subtree = {0};");
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
NAL_GeneratePremisesUnifier(i, term1.atoms[i], 1);
}
if(doublePremise)
{
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
NAL_GeneratePremisesUnifier(i, term2.atoms[i], 2);
}
}
puts("Term conclusion = {0};");
for(int i=0; i<COMPOUND_TERM_SIZE_MAX; i++)
{
NAL_GenerateConclusionSubstitution(i, conclusion_term.atoms[i]);
}
}
static void NAL_GenerateRule(char *premise1, char *premise2, char* conclusion, char* truthFunction, bool doublePremise, bool switchTruthArgs)
{
NAL_GenerateConclusionTerm(premise1, premise2, conclusion, doublePremise);
if(switchTruthArgs)
{
printf("Truth conclusionTruth = %s(truth2,truth1);\n", truthFunction);
}
else
{
printf("Truth conclusionTruth = %s(truth1,truth2);\n", truthFunction);
}
puts("NAL_DerivedEvent(RuleTable_Reduce(conclusion, false), conclusionOccurrence, conclusionTruth, conclusionStamp, currentTime, parentPriority, conceptPriority, validation_concept, validation_cid);}\n");
}
static void NAL_GenerateReduction(char *premise1, char* conclusion)
{
NAL_GenerateConclusionTerm(premise1, NULL, conclusion, false);
puts("IN_DEBUG( fputs(\"Reduced: \", stdout); Narsese_PrintTerm(&term1); fputs(\" -> \", stdout); Narsese_PrintTerm(&conclusion); puts(\"\"); ) \nreturn conclusion;\n}");
}
void NAL_GenerateRuleTable()
{
puts("#include \"RuleTable.h\"");
puts("void RuleTable_Apply(Term term1, Term term2, Truth truth1, Truth truth2, long conclusionOccurrence, Stamp conclusionStamp, long currentTime, double parentPriority, double conceptPriority, bool doublePremise, Concept *validation_concept, long validation_cid)\n{\ngoto RULE_0;");
#define H_NAL_RULES
#include "NAL.h"
#undef H_NAL_RULES
printf("RULE_%d:;\n}\n", ruleID);
printf("Term RuleTable_Reduce(Term term1, bool doublePremise)\n{\ngoto RULE_%d;\n", ruleID);
#define H_NAL_REDUCTIONS
#include "NAL.h"
#undef H_NAL_REDUCTIONS
printf("RULE_%d:;\nreturn term1;\n}\n\n", ruleID);
}
void NAL_DerivedEvent(Term conclusionTerm, long conclusionOccurrence, Truth conclusionTruth, Stamp stamp, long currentTime, double parentPriority, double conceptPriority, Concept *validation_concept, long validation_cid)
{
Event e = { .term = conclusionTerm,
.type = EVENT_TYPE_BELIEF,
.truth = conclusionTruth,
.stamp = stamp,
.occurrenceTime = conclusionOccurrence ,
.creationTime = currentTime };
#pragma omp critical
{
if(validation_concept == NULL || validation_concept->id == validation_cid) //concept recycling would invalidate the derivation (allows to lock only adding results to memory)
{
Memory_addEvent(&e, currentTime, conceptPriority*parentPriority*Truth_Expectation(conclusionTruth), false, true, false, false);
}
}
}
|
bml_adjungate_triangle_ellsort_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_introspection.h"
#include "../bml_logger.h"
#include "../bml_types.h"
#include "bml_adjungate_triangle_ellsort.h"
#include "bml_types_ellsort.h"
#include <stdio.h>
#include <stdlib.h>
#include <complex.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Adjungates a triangle of a matrix in place.
*
* \ingroup adjungate_triangle_group
*
* \param A[in,out] The matrix for which the triangle should be adjungated
* \param triangle[in] Which triangle to adjungate ('u': upper, 'l': lower)
*
* WARNING: Please verify race conditions and parallel performances.
*/
void TYPED_FUNC(
bml_adjungate_triangle_ellsort) (
bml_matrix_ellsort_t * A,
char *triangle)
{
int A_N = A->N;
int A_M = A->M;
int l;
int ll;
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
#ifdef _OPENMP
omp_lock_t lock[A_M];
#endif
switch (*triangle)
{
case 'u':
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_init_lock(&(lock[i]));
#endif
#pragma omp parallel for \
shared(A_N,A_M,A_index,A_nnz,A_value,lock) \
private(l,ll)
// WARNING: Please, check for race conditions ...
for (int i = 0; i < A_N; i++) // For every row
{
l = A_nnz[i];
for (int j = 0; j < l; j++) // We search for indices gt 0.
{
ll = A_index[ROWMAJOR(i, j, A_N, A_M)];
if (ll > 0)
{
if (ll > i)
{
#ifdef _OPENMP
omp_set_lock(&(lock[ll]));
#endif
A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i;
A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] =
conj(A_value[ROWMAJOR(i, j, A_N, A_M)]);
A_nnz[ll]++;
#ifdef _OPENMP
omp_unset_lock(&(lock[ll]));
#endif
}
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_destroy_lock(&(lock[i]));
#endif
break;
case 'l':
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_init_lock(&(lock[i]));
#endif
#pragma omp parallel for \
shared(lock,A_N,A_M,A_index,A_nnz,A_value) \
private(l,ll)
// WARNING: Please, check for race conditions and parallel performances ...
for (int i = 0; i < A_N; i++)
{
l = A_nnz[i];
for (int j = 0; j < l; j++)
{
ll = A_index[ROWMAJOR(i, j, A_N, A_M)];
if (ll >= 0)
{
if (ll < i)
{
#ifdef _OPENMP
omp_set_lock(&(lock[ll]));
#endif
A_index[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] = i;
A_value[ROWMAJOR(ll, A_nnz[ll], A_N, A_M)] =
conj(A_value[ROWMAJOR(i, j, A_N, A_M)]);
A_nnz[ll]++;
#ifdef _OPENMP
omp_unset_lock(&(lock[ll]));
#endif
}
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_M; i++)
omp_destroy_lock(&(lock[i]));
#endif
break;
default:
LOG_ERROR("unknown triangle %c\n", triangle);
break;
}
}
|
GB_select_phase1.c | //------------------------------------------------------------------------------
// GB_select_phase1: count entries in each vector for C=select(A,thunk)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#if defined ( GB_ENTRY_SELECTOR )
//--------------------------------------------------------------------------
// entry selector
//--------------------------------------------------------------------------
ASSERT (GB_JUMBLED_OK (A)) ;
// The count of live entries kth vector A(:,k) is reduced to the kth scalar
// Cp(k). Each thread computes the reductions on roughly the same number
// of entries, which means that a vector A(:,k) may be reduced by more than
// one thread. The first vector A(:,kfirst) reduced by thread tid may be
// partial, where the prior thread tid-1 (and other prior threads) may also
// do some of the reductions for this same vector A(:,kfirst). The thread
// tid reduces all vectors A(:,k) for k in the range kfirst+1 to klast-1.
// The last vector A(:,klast) reduced by thread tid may also be partial.
// Thread tid+1, and following threads, may also do some of the reduces for
// A(:,klast).
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
const GB_ATYPE *GB_RESTRICT Ax = (GB_ATYPE *) A->x ;
size_t asize = A->type->size ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
ASSERT (GB_JUMBLED_OK (A)) ;
//--------------------------------------------------------------------------
// reduce each slice
//--------------------------------------------------------------------------
// each thread reduces its own part in parallel
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then thread tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
//----------------------------------------------------------------------
// reduce vectors kfirst to klast
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) to be reduced by this thread
//------------------------------------------------------------------
GB_GET_J ; // int64_t j = GBH (Ah, k) ; but for user selectop only
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k,
kfirst, klast, pstart_slice, Ap, avlen) ;
//------------------------------------------------------------------
// count entries in Ax [pA_start ... pA_end-1], if non-empty
//------------------------------------------------------------------
if (pA_start < pA_end)
{
//--------------------------------------------------------------
// count the live entries in Ax [pA_start ... pA_end-1]
//--------------------------------------------------------------
int64_t s = 0 ;
for (int64_t p = pA_start ; p < pA_end ; p++)
{
if (GB_TEST_VALUE_OF_ENTRY (p)) s++ ;
}
//--------------------------------------------------------------
// save the result s
//--------------------------------------------------------------
if (k == kfirst)
{
Wfirst [tid] = s ;
}
else if (k == klast)
{
Wlast [tid] = s ;
}
else
{
Cp [k] = s ;
}
}
}
}
//--------------------------------------------------------------------------
// reduce the first and last vector of each slice using a single thread
//--------------------------------------------------------------------------
// This step is sequential, but it takes only O(ntasks) time. The only
// case where this could be a problem is if a user-defined operator was
// a very costly one.
int64_t kprior = -1 ;
for (int tid = 0 ; tid < ntasks ; tid++)
{
//----------------------------------------------------------------------
// sum up the partial result that thread tid computed for kfirst
//----------------------------------------------------------------------
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
if (kfirst <= klast)
{
int64_t pA_start = pstart_slice [tid] ;
int64_t pA_end = GBP (Ap, kfirst+1, avlen) ;
pA_end = GB_IMIN (pA_end, pstart_slice [tid+1]) ;
if (pA_start < pA_end)
{
if (kprior < kfirst)
{
// This thread is the first one that did work on
// A(:,kfirst), so use it to start the reduction.
Cp [kfirst] = Wfirst [tid] ;
}
else
{
Cp [kfirst] += Wfirst [tid] ;
}
kprior = kfirst ;
}
}
//----------------------------------------------------------------------
// sum up the partial result that thread tid computed for klast
//----------------------------------------------------------------------
if (kfirst < klast)
{
int64_t pA_start = GBP (Ap, klast, avlen) ;
int64_t pA_end = pstart_slice [tid+1] ;
if (pA_start < pA_end)
{
/* if */ ASSERT (kprior < klast) ;
{
// This thread is the first one that did work on
// A(:,klast), so use it to start the reduction.
Cp [klast] = Wlast [tid] ;
}
/*
else
{
// If kfirst < klast and A(:,klast is not empty, then this
// task is always the first one to do work on A(:,klast),
// so this case is never used.
ASSERT (GB_DEAD_CODE) ;
Cp [klast] += Wlast [tid] ;
}
*/
kprior = klast ;
}
}
}
#else
//--------------------------------------------------------------------------
// positional selector (tril, triu, diag, offdiag, resize)
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
const int64_t *GB_RESTRICT Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
ASSERT (!GB_JUMBLED (A)) ;
//--------------------------------------------------------------------------
// tril, triu, diag, offdiag, resize: binary search in each vector
//--------------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(guided)
for (k = 0 ; k < anvec ; k++)
{
//----------------------------------------------------------------------
// get A(:,k)
//----------------------------------------------------------------------
int64_t pA_start = GBP (Ap, k, avlen) ;
int64_t pA_end = GBP (Ap, k+1, avlen) ;
int64_t p = pA_start ;
int64_t cjnz = 0 ;
int64_t ajnz = pA_end - pA_start ;
bool found = false ;
if (ajnz > 0)
{
//------------------------------------------------------------------
// search for the entry A(i,k)
//------------------------------------------------------------------
int64_t ifirst = GBI (Ai, pA_start, avlen) ;
int64_t ilast = GBI (Ai, pA_end-1, avlen) ;
#if defined ( GB_RESIZE_SELECTOR )
int64_t i = ithunk ;
#else
int64_t j = GBH (Ah, k) ;
int64_t i = j-ithunk ;
#endif
if (i < ifirst)
{
// all entries in A(:,k) come after i
;
}
else if (i > ilast)
{
// all entries in A(:,k) come before i
p = pA_end ;
}
else if (ajnz == avlen)
{
// A(:,k) is dense
found = true ;
p += i ;
ASSERT (GBI (Ai, p, avlen) == i) ;
}
else
{
// binary search for A (i,k)
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH (i, Ai, p, pright, found) ;
}
#if defined ( GB_TRIL_SELECTOR )
// keep p to pA_end-1
cjnz = pA_end - p ;
#elif defined ( GB_TRIU_SELECTOR ) \
|| defined ( GB_RESIZE_SELECTOR )
// if found, keep pA_start to p
// else keep pA_start to p-1
if (found)
{
p++ ;
// now in both cases, keep pA_start to p-1
}
// keep pA_start to p-1
cjnz = p - pA_start ;
#elif defined ( GB_DIAG_SELECTOR )
// if found, keep p
// else keep nothing
cjnz = found ;
if (!found) p = -1 ;
// if (cjnz >= 0) keep p, else keep nothing
#elif defined ( GB_OFFDIAG_SELECTOR )
// if found, keep pA_start to p-1 and p+1 to pA_end-1
// else keep pA_start to pA_end
cjnz = ajnz - found ;
if (!found)
{
p = pA_end ;
// now just keep pA_start to p-1; p+1 to pA_end is
// now empty
}
// in both cases, keep pA_start to p-1 and
// p+1 to pA_end-1. If the entry is not found, then
// p == pA_end, and all entries are kept.
#endif
}
//----------------------------------------------------------------------
// log the result for the kth vector
//----------------------------------------------------------------------
Zp [k] = p ;
Cp [k] = cjnz ;
}
//--------------------------------------------------------------------------
// compute Wfirst and Wlast for each task
//--------------------------------------------------------------------------
// Wfirst [0..ntasks-1] and Wlast [0..ntasks-1] are required for
// constructing C_start_slice [0..ntasks-1] in GB_selector.
for (int tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
if (kfirst <= klast)
{
int64_t pA_start = pstart_slice [tid] ;
int64_t pA_end = GBP (Ap, kfirst+1, avlen) ;
pA_end = GB_IMIN (pA_end, pstart_slice [tid+1]) ;
if (pA_start < pA_end)
{
#if defined ( GB_TRIL_SELECTOR )
// keep Zp [kfirst] to pA_end-1
int64_t p = GB_IMAX (Zp [kfirst], pA_start) ;
Wfirst [tid] = GB_IMAX (0, pA_end - p) ;
#elif defined ( GB_TRIU_SELECTOR ) \
|| defined ( GB_RESIZE_SELECTOR )
// keep pA_start to Zp [kfirst]-1
int64_t p = GB_IMIN (Zp [kfirst], pA_end) ;
Wfirst [tid] = GB_IMAX (0, p - pA_start) ;
#elif defined ( GB_DIAG_SELECTOR )
// task that owns the diagonal entry does this work
int64_t p = Zp [kfirst] ;
Wfirst [tid] = (pA_start <= p && p < pA_end) ? 1 : 0 ;
#elif defined ( GB_OFFDIAG_SELECTOR )
// keep pA_start to Zp [kfirst]-1
int64_t p = GB_IMIN (Zp [kfirst], pA_end) ;
Wfirst [tid] = GB_IMAX (0, p - pA_start) ;
// keep Zp [kfirst]+1 to pA_end-1
p = GB_IMAX (Zp [kfirst]+1, pA_start) ;
Wfirst [tid] += GB_IMAX (0, pA_end - p) ;
#endif
}
}
if (kfirst < klast)
{
int64_t pA_start = GBP (Ap, klast, avlen) ;
int64_t pA_end = pstart_slice [tid+1] ;
if (pA_start < pA_end)
{
#if defined ( GB_TRIL_SELECTOR )
// keep Zp [klast] to pA_end-1
int64_t p = GB_IMAX (Zp [klast], pA_start) ;
Wlast [tid] = GB_IMAX (0, pA_end - p) ;
#elif defined ( GB_TRIU_SELECTOR ) \
|| defined ( GB_RESIZE_SELECTOR )
// keep pA_start to Zp [klast]-1
int64_t p = GB_IMIN (Zp [klast], pA_end) ;
Wlast [tid] = GB_IMAX (0, p - pA_start) ;
#elif defined ( GB_DIAG_SELECTOR )
// task that owns the diagonal entry does this work
int64_t p = Zp [klast] ;
Wlast [tid] = (pA_start <= p && p < pA_end) ? 1 : 0 ;
#elif defined ( GB_OFFDIAG_SELECTOR )
// keep pA_start to Zp [klast]-1
int64_t p = GB_IMIN (Zp [klast], pA_end) ;
Wlast [tid] = GB_IMAX (0, p - pA_start) ;
// keep Zp [klast]+1 to pA_end-1
p = GB_IMAX (Zp [klast]+1, pA_start) ;
Wlast [tid] += GB_IMAX (0, pA_end - p) ;
#endif
}
}
}
#endif
|
Shuffle.h | // --------------------------------------------------------------------------
// Binary Brain -- binary neural net framework
//
// Copyright (C) 2018 by Ryuji Fuchikami
// https://github.com/ryuz
// ryuji.fuchikami@nifty.com
// --------------------------------------------------------------------------
#pragma once
#include "bb/Manager.h"
#include "bb/Model.h"
namespace bb {
// Shuffle
class Shuffle : public Model
{
using _super = Model;
public:
static inline std::string ModelName(void) { return "Shuffle"; }
static inline std::string ObjectName(void){ return ModelName(); }
std::string GetModelName(void) const override { return ModelName(); }
std::string GetObjectName(void) const override { return ObjectName(); }
protected:
bool m_host_only = false;
indices_t m_input_shape;
indices_t m_output_shape;
index_t m_shuffle_unit = 0;
public:
// 生成情報
struct create_t
{
index_t shuffle_unit = 0;
indices_t output_shape;
};
protected:
Shuffle() {}
Shuffle(create_t const &create)
{
m_output_shape = create.output_shape;
m_shuffle_unit = create.shuffle_unit;
}
/**
* @brief コマンド処理
* @detail コマンド処理
* @param args コマンド
*/
void CommandProc(std::vector<std::string> args)
{
// HostOnlyモード設定
if (args.size() == 2 && args[0] == "host_only")
{
m_host_only = EvalBool(args[1]);
}
}
void PrintInfoText(std::ostream& os, std::string indent, int columns, int nest, int depth) const override
{
_super::PrintInfoText(os, indent, columns, nest, depth);
// os << indent << " input shape : " << GetInputShape();
// os << indent << " output shape : " << GetOutputShape();
os << indent << " shuffle_unit : " << m_shuffle_unit << std::endl;
}
public:
~Shuffle() {}
static std::shared_ptr<Shuffle> Create(create_t const &create)
{
return std::shared_ptr<Shuffle>(new Shuffle(create));
}
static std::shared_ptr<Shuffle> Create(index_t shuffle_unit, indices_t output_shape=indices_t())
{
create_t create;
create.shuffle_unit = shuffle_unit;
create.output_shape = output_shape;
return Create(create);
}
static std::shared_ptr<Shuffle> Create(index_t shuffle_unit, index_t output_node_size)
{
return Create(shuffle_unit, indices_t({output_node_size}));
}
static std::shared_ptr<Shuffle> Create(void)
{
return Create(create_t());
}
#ifdef BB_PYBIND11
static std::shared_ptr<Shuffle> CreatePy(index_t shuffle_unit, indices_t output_shape=indices_t())
{
create_t create;
create.shuffle_unit = shuffle_unit;
create.output_shape = output_shape;
return Create(create);
}
#endif
/**
* @brief 入力形状設定
* @detail 入力形状を設定する
* 内部変数を初期化し、以降、GetOutputShape()で値取得可能となることとする
* 同一形状を指定しても内部変数は初期化されるものとする
* @param shape 1フレームのノードを構成するshape
* @return 出力形状を返す
*/
indices_t SetInputShape(indices_t shape) override
{
m_input_shape = shape;
if ( m_output_shape.empty() || CalcShapeSize(shape) != CalcShapeSize(m_output_shape) ) {
m_output_shape = m_input_shape;
}
BB_ASSERT(CalcShapeSize(m_output_shape) % m_shuffle_unit == 0);
return m_output_shape;
}
/**
* @brief 入力形状取得
* @detail 入力形状を取得する
* @return 入力形状を返す
*/
indices_t GetInputShape(void) const override
{
return m_input_shape;
}
/**
* @brief 出力形状取得
* @detail 出力形状を取得する
* @return 出力形状を返す
*/
indices_t GetOutputShape(void) const override
{
return m_output_shape;
}
/**
* @brief forward演算
* @detail forward演算を行う
* @param x 入力データ
* @param train 学習時にtrueを指定
* @return forward演算結果
*/
inline FrameBuffer Forward(FrameBuffer x_buf, bool train = true) override
{
// 戻り値のサイズ設定
FrameBuffer y_buf( x_buf.GetFrameSize(), m_output_shape, x_buf.GetType());
#ifdef BB_WITH_CUDA
if ( !m_host_only && x_buf.IsDeviceAvailable() && y_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto ptr_x = x_buf.LockDeviceMemoryConst();
auto ptr_y = y_buf.LockDeviceMemory(true);
bbcu_Shuffle_Forward<int>(
(int const *)ptr_x.GetAddr(),
(int *)ptr_y.GetAddr(),
(unsigned int )m_shuffle_unit,
(unsigned int )x_buf.GetNodeSize(),
(unsigned int )x_buf.GetFrameSize(),
(unsigned int )(x_buf.GetFrameStride() / sizeof(int))
);
return y_buf;
}
#endif
{
// 汎用版
index_t frame_size = x_buf.GetFrameSize();
index_t node_size = x_buf.GetNodeSize();
index_t stride_size = x_buf.GetFrameStride();
index_t y_unit_size = m_shuffle_unit;
index_t x_unit_size = node_size / y_unit_size;
auto x_ptr = (std::uint8_t *)x_buf.LockMemoryConst().GetAddr();
auto y_ptr = (std::uint8_t *)y_buf.LockMemory().GetAddr();
#pragma omp parallel for
for ( index_t i = 0; i < x_unit_size; ++i ) {
for ( index_t j = 0; j < y_unit_size; ++j ) {
memcpy(&y_ptr[(i*y_unit_size+j)*stride_size], &x_ptr[(j*x_unit_size+i)*stride_size], stride_size);
}
}
return y_buf;
}
}
/**
* @brief backward演算
* @detail backward演算を行う
*
* @return backward演算結果
*/
inline FrameBuffer Backward(FrameBuffer dy_buf) override
{
if (dy_buf.Empty()) {
return FrameBuffer();
}
// 戻り値のサイズ設定
FrameBuffer dx_buf(dy_buf.GetFrameSize(), m_input_shape, dy_buf.GetType());
#ifdef BB_WITH_CUDA
if ( !m_host_only && dy_buf.IsDeviceAvailable() && dx_buf.IsDeviceAvailable() && Manager::IsDeviceAvailable() ) {
// CUDA版
auto ptr_dy = dy_buf.LockDeviceMemoryConst();
auto ptr_dx = dx_buf.LockDeviceMemory(true);
bbcu_Shuffle_Backward<int>(
(int const *)ptr_dy.GetAddr(),
(int *)ptr_dx.GetAddr(),
(unsigned int )m_shuffle_unit,
(unsigned int )dy_buf.GetNodeSize(),
(unsigned int )dy_buf.GetFrameSize(),
(unsigned int )(dy_buf.GetFrameStride() / sizeof(int))
);
return dx_buf;
}
#endif
{
// 汎用版
index_t frame_size = dy_buf.GetFrameSize();
index_t node_size = dy_buf.GetNodeSize();
index_t stride_size = dy_buf.GetFrameStride();
index_t y_unit_size = m_shuffle_unit;
index_t x_unit_size = node_size / y_unit_size;
auto dy_ptr = (std::uint8_t *)dy_buf.LockMemoryConst().GetAddr();
auto dx_ptr = (std::uint8_t *)dx_buf.LockMemory().GetAddr();
#pragma omp parallel for
for ( index_t i = 0; i < y_unit_size; ++i ) {
for ( index_t j = 0; j < x_unit_size; ++j ) {
memcpy(&dx_ptr[(i*x_unit_size+j)*stride_size], &dy_ptr[(j*y_unit_size+i)*stride_size], stride_size);
}
}
return dx_buf;
}
}
// シリアライズ
protected:
void DumpObjectData(std::ostream &os) const override
{
// バージョン
std::int64_t ver = 1;
bb::SaveValue(os, ver);
// 親クラス
_super::DumpObjectData(os);
// メンバ
bb::SaveValue(os, m_host_only);
bb::SaveValue(os, m_input_shape);
bb::SaveValue(os, m_output_shape);
bb::SaveValue(os, m_shuffle_unit);
}
void LoadObjectData(std::istream &is) override
{
// バージョン
std::int64_t ver;
bb::LoadValue(is, ver);
BB_ASSERT(ver == 1);
// 親クラス
_super::LoadObjectData(is);
// メンバ
bb::LoadValue(is, m_host_only);
bb::LoadValue(is, m_input_shape);
bb::LoadValue(is, m_output_shape);
bb::LoadValue(is, m_shuffle_unit);
// 再構築
}
};
}
// end of file
|
evolve.c | /**
@file evolve.c
@brief This file contains all the core VPLANET integration routines including the
timestepping algorithm and the Runge-Kutta Integration scheme.
@author Rory Barnes ([RoryBarnes](https://github.com/RoryBarnes/))
@date May 2014
*/
#define NUM_THREADS 4
#include "vplanet.h"
void PropsAuxGeneral(BODY *body,CONTROL *control) {
/* Recompute the mean motion, necessary for most modules */
int iBody; // Dummy counting variable
for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) {
if (iBody != 0 && body[iBody].bBinary == 0) {
body[iBody].dMeanMotion = fdSemiToMeanMotion(body[iBody].dSemi,(body[0].dMass+body[iBody].dMass));
}
}
}
void PropertiesAuxiliary(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update) {
/* Evaluate single and multi-module auxialliary functions to update parameters
* of interest such as mean motion.
*/
int iBody,iModule; // Dummy counter variables
PropsAuxGeneral(body,control);
/* Get properties from each module */
for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) {
// Uni-module properties
for (iModule=0;iModule<control->Evolve.iNumModules[iBody];iModule++)
control->fnPropsAux[iBody][iModule](body,&control->Evolve,&control->Io,update,iBody);
// Multi-module properties
for (iModule=0;iModule<control->iNumMultiProps[iBody];iModule++)
control->fnPropsAuxMulti[iBody][iModule](body,&control->Evolve,&control->Io,update,iBody);
}
}
void CalculateDerivatives(BODY *body,SYSTEM *system,UPDATE *update,
fnUpdateVariable ***fnUpdate,int iNumBodies) {
int iBody,iVar,iEqn;
for (iBody=0;iBody<iNumBodies;iBody++) {
for (iVar=0;iVar<update[iBody].iNumVars;iVar++) {
update[iBody].daDeriv[iVar] = 0;
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
update[iBody].daDerivProc[iVar][iEqn] =
fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
update[iBody].daDeriv[iVar] += update[iBody].daDerivProc[iVar][iEqn];
}
}
}
iBody = 0;
}
void CheckProgress(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update) {
int iBody,jBody;
if (control->Io.iVerbose >= VERBPROG && !control->Io.bMutualIncMessage
&& control->Io.dMaxMutualInc > 0) {
// If made it here, more than 1 body must be present
if (body[1].bSpiNBody) {
// Calculate orbital elements
for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) {
cart2osc(body,iBody);
}
}
// Skip central body
for (iBody=1;iBody<control->Evolve.iNumBodies;iBody++) {
for (jBody=iBody+1;jBody<control->Evolve.iNumBodies;jBody++) {
// 1 to check progress, not halt
if (fbCheckMaxMutualInc(body,&control->Evolve,control->Halt,
&control->Io,iBody,jBody,1)) {
/*
if (control->Io.iVerbose >= VERBPROG) {
printf("WARNING: Mutual inclination of %s and %s exceeds ",
body[iBody].cName,body[jBody].cName);
fprintd(stdout,control->Io.dMaxMutualInc,control->Io.iSciNot,
control->Io.iDigits);
printf(" at t = %.2e years.\n",control->Evolve.dTime);
}
*/
control->Io.bMutualIncMessage = 1;
}
}
}
}
}
/*
* Integration Control
*/
double AssignDt(double dMin,double dNextOutput,double dEta) {
/* Compute the next timestep, dt, making sure it's not larger than the output
* cadence */
dMin = dEta * dMin;
if (dNextOutput < dMin)
{
dMin = dNextOutput;
}
return dMin;
}
double fdGetTimeStep(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate) {
/* Fills the Update arrays with the derivatives
* or new values. It returns the smallest timescale for use
* in variable timestepping. Uses either a 4th order Runge-Kutte integrator or
* an Euler step.
*/
int iBody,iVar,iEqn; // Dummy counting variables
EVOLVE integr; // Dummy EVOLVE struct so we don't have to dereference control a lot
double dVarNow,dMinNow,dMin=dHUGE,dVarTotal; // Intermediate storage variables
integr = control->Evolve;
dMin = dHUGE;
for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) {
if (update[iBody].iNumVars > 0) {
for (iVar=0;iVar<update[iBody].iNumVars;iVar++) {
// The parameter does not require a derivative, but is calculated explicitly as a function of age.
/*
printf("%d %d\n",iBody,iVar);
fflush(stdout);
*/
if (update[iBody].iaType[iVar][0] == 0) {
dVarNow = *update[iBody].pdVar[iVar];
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
update[iBody].daDerivProc[iVar][iEqn] =
fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
}
if (control->Evolve.bFirstStep) {
dMin = integr.dTimeStep;
control->Evolve.bFirstStep = 0;
} else {
/* Sum over all equations giving new value of the variable */
dVarTotal = 0.;
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
dVarTotal += update[iBody].daDerivProc[iVar][iEqn];
}
// Prevent division by zero
if (dVarNow != dVarTotal) {
dMinNow = fabs(dVarNow/((dVarNow - dVarTotal)/integr.dTimeStep));
if (dMinNow < dMin)
dMin = dMinNow;
}
}
/* Equations that are integrated in the matrix but are NOT allowed to dictate
timestepping. These are derived quantities, like lost energy, that must
be integrated as primary variables to keep track of them properly, i.e.
lost energy depends on changing radii, which are integrated. But in this
case, since they are derived quantities, they should NOT participate in
timestep selection - dflemin3
*/
} else if (update[iBody].iaType[iVar][0] == 5) {
//continue;
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
update[iBody].daDerivProc[iVar][iEqn] =
fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
}
/* Integration for binary, where parameters can be computed via derivatives,
or as an explicit function of age */
} else if (update[iBody].iaType[iVar][0] == 10) {
/* Equations not in matrix, computing things as explicit function of time,
so we set dMin to time until next output
Figure out time until next output */
dMinNow = control->Io.dNextOutput;
if (dMinNow < dMin) {
dMin = dMinNow;
}
/* The parameter does not require a derivative, but is calculated
explicitly as a function of age and is a sinusoidal quantity
(e.g. h,k,p,q in DistOrb) */
} else if (update[iBody].iaType[iVar][0] == 3) {
dVarNow = *update[iBody].pdVar[iVar];
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
}
if (control->Evolve.bFirstStep) {
dMin = integr.dTimeStep;
control->Evolve.bFirstStep = 0;
} else {
/* Sum over all equations giving new value of the variable */
dVarTotal = 0.;
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
dVarTotal += update[iBody].daDerivProc[iVar][iEqn];
}
// Prevent division by zero
if (dVarNow != dVarTotal) {
dMinNow = fabs(1.0/((dVarNow - dVarTotal)/integr.dTimeStep));
if (dMinNow < dMin)
dMin = dMinNow;
}
}
/* The parameter is a "polar/sinusoidal quantity" and
controlled by a time derivative */
} else {
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
if (update[iBody].iaType[iVar][iEqn] == 2) {
update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
//if (update[iBody].daDerivProc[iVar][iEqn] != 0 && *(update[iBody].pdVar[iVar]) != 0) {
if (update[iBody].daDerivProc[iVar][iEqn] != 0) {
/* ?Obl require special treatment because they can
overconstrain obliquity and PrecA */
if (iVar == update[iBody].iXobl || iVar == update[iBody].iYobl || iVar == update[iBody].iZobl) {
if (body[iBody].dObliquity != 0) {
dMinNow = fabs(sin(body[iBody].dObliquity)/update[iBody].daDerivProc[iVar][iEqn]);
} else { // Obliquity is 0, so its evolution shouldn't impact the timestep
dMinNow = dHUGE;
}
} else if (iVar == update[iBody].iHecc || iVar == update[iBody].iKecc) {
if (body[iBody].dEcc != 0) {
dMinNow = fabs(body[iBody].dEcc/update[iBody].daDerivProc[iVar][iEqn]);
} else { // Eccentricity is 0, so its evolution shouldn't impact the timestep
dMinNow = dHUGE;
}
} else {
dMinNow = fabs(1.0/update[iBody].daDerivProc[iVar][iEqn]);
}
if (dMinNow < dMin) {
dMin = dMinNow;
}
}
// enforce a minimum step size for ice sheets, otherwise dDt -> 0 real fast
} else if (update[iBody].iaType[iVar][iEqn] == 9) {
update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
if (update[iBody].daDerivProc[iVar][iEqn] != 0 && *(update[iBody].pdVar[iVar]) != 0) {
dMinNow = fabs((*(update[iBody].pdVar[iVar]))/update[iBody].daDerivProc[iVar][iEqn]);
if (dMinNow < dMin) {
if (dMinNow < control->Halt[iBody].iMinIceDt*(2*PI/body[iBody].dMeanMotion)/control->Evolve.dEta) {
dMin = control->Halt[iBody].iMinIceDt*(2*PI/body[iBody].dMeanMotion)/control->Evolve.dEta;
} else {
dMin = dMinNow;
}
}
}
// SpiNBody timestep: semi-temporary hack XXX
// dt = r^2/v^2
// r: Position vector
// v: Velocity vector
// Inefficient?
} else if (update[iBody].iaType[iVar][iEqn] == 7) {
if ( (control->Evolve.bSpiNBodyDistOrb==0) || (control->Evolve.bUsingSpiNBody==1) ) {
update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
dMinNow = sqrt((body[iBody].dPositionX*body[iBody].dPositionX+body[iBody].dPositionY*body[iBody].dPositionY+body[iBody].dPositionZ*body[iBody].dPositionZ)
/(body[iBody].dVelX*body[iBody].dVelX+body[iBody].dVelY*body[iBody].dVelY+body[iBody].dVelZ*body[iBody].dVelZ));
if (dMinNow < dMin)
dMin = dMinNow;
}
} else {
// The parameter is controlled by a time derivative
update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
if (!bFloatComparison(update[iBody].daDerivProc[iVar][iEqn],0.0) && !bFloatComparison(*(update[iBody].pdVar[iVar]),0.0)) {
dMinNow = fabs((*(update[iBody].pdVar[iVar]))/update[iBody].daDerivProc[iVar][iEqn]);
if (dMinNow < dMin)
dMin = dMinNow;
}
}
} // for loop
} // else polar/sinusoidal
} // for iNumVars
} // if (update[iBody].iNumVars > 0)
} // for loop iNumBodies
return dMin;
}
void fdGetUpdateInfo(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate) {
/* Fills the Update arrays with the derivatives
* or new values..
*/
int iBody,iVar,iEqn,iNumBodies,iNumVars,iNumEqns; // Dummy counting variables
EVOLVE integr; // Dummy EVOLVE struct so we don't have to dereference control a lot
double dVarNow,dMinNow,dMin=dHUGE,dVarTotal; // Intermediate storage variables
integr = control->Evolve;
iNumBodies = control->Evolve.iNumBodies;
for (iBody=0;iBody<iNumBodies;iBody++) {
if (update[iBody].iNumVars > 0) {
iNumVars = update[iBody].iNumVars;
for (iVar=0;iVar<iNumVars;iVar++) {
iNumEqns = update[iBody].iNumEqns[iVar];
for (iEqn=0;iEqn<iNumEqns;iEqn++) {
update[iBody].daDerivProc[iVar][iEqn] = fnUpdate[iBody][iVar][iEqn](body,system,update[iBody].iaBody[iVar][iEqn]);
}
}
}
}
}
void EulerStep(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate,double *dDt,int iDir) {
/* Compute and apply an Euler update step to a given parameter (x = dx/dt * dt) */
int iBody,iVar,iEqn;
double dFoo;
/* Adjust dt? */
if (control->Evolve.bVarDt) {
/* dDt is the dynamical timescale */
*dDt = fdGetTimeStep(body,control,system,update,fnUpdate);
*dDt = AssignDt(*dDt,(control->Io.dNextOutput - control->Evolve.dTime),control->Evolve.dEta);
}
for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) {
for (iVar=0;iVar<update[iBody].iNumVars;iVar++) {
for (iEqn=0;iEqn<update[iBody].iNumEqns[iVar];iEqn++) {
if (update[iBody].iaType[iVar][iEqn] == 0)
/* XXX This looks broken */
*(update[iBody].pdVar[iVar]) = update[iBody].daDerivProc[iVar][iEqn];
else {
/* Update the parameter in the BODY struct! Be careful! */
*(update[iBody].pdVar[iVar]) += iDir*update[iBody].daDerivProc[iVar][iEqn]*(*dDt);
}
}
}
}
}
void RungeKutta4Step(BODY *body,CONTROL *control,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate,double *dDt,int iDir) {
/* Compute and apply a 4th order Runge-Kutta update step a given parameter. */
int iBody,iVar,iEqn,iSubStep,iNumBodies,iNumVars,iNumEqns;
double dFoo,dDelta;
EVOLVE *evolve = &(control->Evolve); // Save Evolve as a variable for speed and legibility
/* Create a copy of BODY array */
BodyCopy(evolve->tmpBody,body,&control->Evolve);
/* Derivatives at start */
*dDt = fdGetTimeStep(body,control,system,control->Evolve.tmpUpdate,fnUpdate);
/* Adjust dt? */
if (evolve->bVarDt) {
/* This is minimum dynamical timescale */
*dDt = AssignDt(*dDt,(control->Io.dNextOutput - evolve->dTime),evolve->dEta);
} else {
*dDt = evolve->dTimeStep;
}
evolve->dCurrentDt = *dDt;
iNumBodies = evolve->iNumBodies;
#pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars,iNumEqns,iVar,iEqn)
for (iBody=0;iBody<iNumBodies;iBody++) {
//int thread_num = omp_get_thread_num();
//int cpu_num = sched_getcpu();
//printf("Thread %3d is running on CPU %3d\n", thread_num, cpu_num);
double daDerivVar;
iNumVars = update[iBody].iNumVars;
for (iVar=0;iVar<iNumVars;iVar++) {
daDerivVar = 0;
iNumEqns = update[iBody].iNumEqns[iVar];
for (iEqn=0;iEqn<iNumEqns;iEqn++) {
// XXX Set update.dDxDtModule here?
daDerivVar += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn];
//evolve->daTmpVal[0][iBody][iVar] += (*dDt)*iDir*evolve->tmpUpdate[iBody].daDeriv[iVar][iEqn];
}
evolve->daDeriv[0][iBody][iVar] = daDerivVar;
}
}
for (iBody=0;iBody<iNumBodies;iBody++) {
iNumVars = update[iBody].iNumVars;
for (iVar=0;iVar<iNumVars;iVar++) {
if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){
// LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest
// These values are updated in the tmpUpdate struct so that equations which are dependent upon them will be
// evaluated with higher accuracy
*(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[0][iBody][iVar];
} else {
/* While we're in this loop, move each parameter to the midpoint of the timestep */
*(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + 0.5*(*dDt)*evolve->daDeriv[0][iBody][iVar];
}
}
}
/* First midpoint derivative.*/
PropertiesAuxiliary(evolve->tmpBody,control,system,update);
fdGetUpdateInfo(evolve->tmpBody,control,system,evolve->tmpUpdate,fnUpdate);
#pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars,iNumEqns,iVar,iEqn)
for (iBody=0;iBody<iNumBodies;iBody++) {
iNumVars = update[iBody].iNumVars;
double daDerivVar;
for (iVar=0;iVar<iNumVars;iVar++) {
daDerivVar = 0;
iNumEqns = update[iBody].iNumEqns[iVar];
for (iEqn=0;iEqn<iNumEqns;iEqn++) {
daDerivVar += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn];
//evolve->daTmpVal[0][iBody][iVar] += (*dDt)*iDir*evolve->tmpUpdate[iBody].daDeriv[iVar][iEqn];
}
evolve->daDeriv[1][iBody][iVar] = daDerivVar;
}
}
for (iBody=0;iBody<iNumBodies;iBody++) {
iNumVars = update[iBody].iNumVars;
for (iVar=0;iVar<iNumVars;iVar++) {
if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){
// LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest
// These values are updated in the tmpUpdate struct so that equations which are dependent upon them will be
// evaluated with higher accuracy
*(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[1][iBody][iVar];
} else {
/* While we're in this loop, move each parameter to the midpoint
of the timestep based on the midpoint derivative. */
*(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + 0.5*(*dDt)*evolve->daDeriv[1][iBody][iVar];
}
}
}
/* Second midpoint derivative */
PropertiesAuxiliary(evolve->tmpBody,control,system,update);
fdGetUpdateInfo(evolve->tmpBody,control,system,evolve->tmpUpdate,fnUpdate);
#pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars,iNumEqns,iVar,iEqn)
for (iBody=0;iBody<iNumBodies;iBody++) {
iNumVars = update[iBody].iNumVars;
double daDerivVar;
for (iVar=0;iVar<iNumVars;iVar++) {
daDerivVar = 0;
iNumEqns = update[iBody].iNumEqns[iVar];
for (iEqn=0;iEqn<iNumEqns;iEqn++) {
daDerivVar += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn];
//evolve->daTmpVal[0][iBody][iVar] += (*dDt)*iDir*evolve->tmpUpdate[iBody].daDeriv[iVar][iEqn];
}
evolve->daDeriv[2][iBody][iVar] = daDerivVar;
}
}
for (iBody=0;iBody<iNumBodies;iBody++) {
iNumVars = update[iBody].iNumVars;
for (iVar=0;iVar<iNumVars;iVar++) {
if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){
// LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest
// These values are updated in the tmpUpdate struct so that equations which are dependent upon them will be
// evaluated with higher accuracy
*(evolve->tmpUpdate[iBody].pdVar[iVar]) = evolve->daDeriv[2][iBody][iVar];
} else {
/* While we're in this loop, move each parameter to the end of
the timestep based on the second midpoint derivative. */
*(evolve->tmpUpdate[iBody].pdVar[iVar]) = *(update[iBody].pdVar[iVar]) + *dDt*evolve->daDeriv[2][iBody][iVar];
}
}
}
/* Full step derivative */
PropertiesAuxiliary(evolve->tmpBody,control,system,update);
fdGetUpdateInfo(evolve->tmpBody,control,system,evolve->tmpUpdate,fnUpdate);
#pragma omp parallel for num_threads(NUM_THREADS) private(iNumVars,iNumEqns,iVar,iEqn)
for (iBody=0;iBody<iNumBodies;iBody++) {
double daDerivVar;
iNumVars = update[iBody].iNumVars;
for (iVar=0;iVar<iNumVars;iVar++) {
daDerivVar = 0;
if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){
// NOTHING!
} else {
evolve->daDeriv[3][iBody][iVar] = 0;
iNumEqns = update[iBody].iNumEqns[iVar];
for (iEqn=0;iEqn<iNumEqns;iEqn++) {
daDerivVar += iDir*evolve->tmpUpdate[iBody].daDerivProc[iVar][iEqn];
}
evolve->daDeriv[3][iBody][iVar] = daDerivVar;
}
}
}
/* Now do the update -- Note the pointer to the home of the actual variables!!! */
for (iBody=0;iBody<iNumBodies;iBody++) {
iNumVars = update[iBody].iNumVars;
for (iVar=0;iVar<iNumVars;iVar++) {
update[iBody].daDeriv[iVar] = 1./6*(evolve->daDeriv[0][iBody][iVar] + 2*evolve->daDeriv[1][iBody][iVar] +
2*evolve->daDeriv[2][iBody][iVar] + evolve->daDeriv[3][iBody][iVar]);
if (update[iBody].iaType[iVar][0] == 0 || update[iBody].iaType[iVar][0] == 3 || update[iBody].iaType[iVar][0] == 10){
// LUGER: Note that this is the VALUE of the variable getting passed, contrary to what the names suggest
*(update[iBody].pdVar[iVar]) = evolve->daDeriv[0][iBody][iVar];
} else {
*(update[iBody].pdVar[iVar]) += update[iBody].daDeriv[iVar]*(*dDt);
}
}
}
}
/*
* Evolution Subroutine
*/
void Evolve(BODY *body,CONTROL *control,FILES *files,MODULE *module,OUTPUT *output,SYSTEM *system,UPDATE *update,fnUpdateVariable ***fnUpdate,fnWriteOutput *fnWrite,fnIntegrate fnOneStep) {
/* Master evolution routine that controls the simulation integration. */
int iDir,iBody,iModule,nSteps; // Dummy counting variables
double dDt,dFoo; // Next timestep, dummy variable
double dEqSpinRate; // Store the equilibrium spin rate
nSteps=0;
if (control->Evolve.bDoForward)
iDir=1;
else
iDir=-1;
PropertiesAuxiliary(body,control,system,update);
control->Io.dNextOutput = control->Evolve.dTime + control->Io.dOutputTime;
// Get derivatives at start, useful for logging
dDt = fdGetTimeStep(body,control,system,update,fnUpdate);
/* Adjust dt? */
if (control->Evolve.bVarDt) {
/* Now choose the correct timestep */
dDt = AssignDt(dDt,(control->Io.dNextOutput - control->Evolve.dTime),control->Evolve.dEta);
} else
dDt = control->Evolve.dTimeStep;
/* Write out initial conditions */
WriteOutput(body,control,files,output,system,update,fnWrite,control->Evolve.dTime,dDt);
/* If Runge-Kutta need to copy actual update to that in
control->Evolve. This transfer all the meta-data about the
struct. */
UpdateCopy(control->Evolve.tmpUpdate,update,control->Evolve.iNumBodies);
/*
*
* Main loop begins here
*
*/
while (control->Evolve.dTime < control->Evolve.dStopTime) {
/* Take one step */
fnOneStep(body,control,system,update,fnUpdate,&dDt,iDir);
for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++) {
for (iModule=0;iModule<control->Evolve.iNumModules[iBody];iModule++)
control->fnForceBehavior[iBody][iModule](body,module,&control->Evolve,&control->Io,system,update,fnUpdate,iBody,iModule);
for (iModule=0;iModule<control->iNumMultiForce[iBody];iModule++)
control->fnForceBehaviorMulti[iBody][iModule](body,module,&control->Evolve,&control->Io,system,update,fnUpdate,iBody,iModule);
}
fdGetUpdateInfo(body,control,system,update,fnUpdate);
/* Halt? */
if (fbCheckHalt(body,control,update,fnUpdate)) {
fdGetUpdateInfo(body,control,system,update,fnUpdate);
WriteOutput(body,control,files,output,system,update,fnWrite,control->Evolve.dTime,control->Io.dOutputTime/control->Evolve.nSteps);
return;
}
for (iBody=0;iBody<control->Evolve.iNumBodies;iBody++)
body[iBody].dAge += iDir*dDt;
control->Evolve.dTime += dDt;
nSteps++;
/* Time for Output? */
if (control->Evolve.dTime >= control->Io.dNextOutput) {
control->Evolve.nSteps += nSteps;
WriteOutput(body,control,files,output,system,update,fnWrite,control->Evolve.dTime,control->Io.dOutputTime/control->Evolve.nSteps);
// Timesteps are synchronized with the output time, so this statement is sufficient
control->Io.dNextOutput += control->Io.dOutputTime;
nSteps=0;
}
/* Get auxiliary properties for next step -- first call
was prior to loop. */
PropertiesAuxiliary(body,control,system,update);
// If control->Evolve.bFirstStep hasn't been switched off by now, do so.
if (control->Evolve.bFirstStep) {
control->Evolve.bFirstStep = 0;
}
// Any variables reached an interesting value?
CheckProgress(body,control,system,update);
}
if (control->Io.iVerbose >= VERBPROG)
printf("Evolution completed.\n");
// printf("%d\n",body[1].iBadImpulse);
}
|
avx512_gemm.h | #pragma once
#include "intgemm_config.h"
#ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW
#include "interleave.h"
#include "kernels.h"
#include "multiply.h"
#include "types.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* AVX512 implementation.
* This uses INTGEMM_AVX512BW, INTGEMM_AVX512DQ, and might use AVX512VL
* That means it supports mainstream CPUs with AVX512, starting with Skylake
* Xeons.
* It does not support any Knights / Xeon Phi processors.
*
* All memory must be 64-byte aligned.
*/
namespace intgemm {
// AVX512 has combined collapse and store instructions:
// _mm512_mask_cvtsepi32_storeu_epi16
// _mm512_mask_cvtsepi32_storeu_epi8
// So conversion in memory uses these, but I also implement a wider version for
// rearranging B.
// Convert to 16-bit signed integers.
namespace avx512f {
// Load from memory, multiply, and convert to int32_t.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW inline __m512i QuantizerGrab(const float *input, const __m512 quant_mult_reg) {
return kernels::quantize(loadu_ps<__m512>(input), quant_mult_reg);
}
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_SELECT_COL_B(INTGEMM_AVX512BW, __m512i)
// For PrepareB we want to read 8 columns at a time. When converting 32-bit
// floats to 8-bit values, that's 32 bytes of floats. But AVX512 is 64 bytes
// wide so it reads off the edge of the tile. We could expand the tile size
// but then the memory written to won't be contiguous anyway so we'd be doing a
// scatter anyway. Easier to just read the 8 columns we wanted as 256 bits
// concatenate.
INTGEMM_AVX512DQ inline __m512 Concat(const __m256 first, const __m256 second) {
// INTGEMM_AVX512DQ but that goes with INTGEMM_AVX512BW anyway.
return _mm512_insertf32x8(_mm512_castps256_ps512(first), second, 1);
}
// Like QuantizerGrab, but allows 32-byte halves (i.e. 8 columns) to be controlled independently.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW inline __m512i QuantizerGrabHalves(const float *input0, const float *input1, const __m512 quant_mult_reg) {
__m512 appended = avx512f::Concat(loadu_ps<__m256>(input0), loadu_ps<__m256>(input1));
appended = _mm512_mul_ps(appended, quant_mult_reg);
return _mm512_cvtps_epi32(appended);
}
// These are only used for reshaping due to the AVX512 instructions
// _mm512_mask_cvtsepi32_storeu_epi16 and _mm512_mask_cvtsepi32_storeu_epi8
// being used for the quantizer.
class QuantizeTile16 {
public:
typedef __m512i Register;
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW explicit QuantizeTile16(float mult) : mult_reg_(_mm512_set1_ps(mult)) {}
INTGEMM_AVX512BW Register ConsecutiveWithWrapping(const float *input, Index cols_left, Index cols, Index row_step) const {
auto input0 = input;
auto input1 = input + 16 + (cols_left <= 16 ? cols * (row_step - 1) : 0);
auto g0 = QuantizerGrabHalves(input0, input1, mult_reg_);
auto g1 = QuantizerGrabHalves(input0 + 8, input1 + 8, mult_reg_);
auto packed = packs_epi32(g0, g1);
return _mm512_permutex_epi64(packed, 0xd8 /* 0, 2, 1, 3 */);
}
INTGEMM_AVX512BW inline __m512i ForReshape(const float *input, Index cols) const {
__m512i g0 = QuantizerGrabHalves(input, input + 16 * cols, mult_reg_);
__m512i g1 = QuantizerGrabHalves(input + 8 * cols, input + 24 * cols, mult_reg_);
__m512i packed = packs_epi32(g0, g1);
// Permute within 256-bit lanes, so same as INTGEMM_AVX2
return _mm512_permutex_epi64(packed, 0xd8 /* 0, 2, 1, 3 */);
}
private:
const __m512 mult_reg_;
};
class QuantizeTile8 {
public:
typedef __m512i Register;
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW explicit QuantizeTile8(float mult) : mult_reg_(_mm512_set1_ps(mult)) {}
INTGEMM_AVX512BW Register ConsecutiveWithWrapping(const float *input, Index cols_left, Index cols, Index row_step) const {
static const __m512i neg127 = _mm512_set1_epi8(-127);
static const __m512i shuffle_param = _mm512_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0);
const float* inputs[4];
for (Index i = 0; i < sizeof(inputs) / sizeof(inputs[0]); ++i) {
while (cols_left < sizeof(Register) / sizeof(float)) {
input += cols * (row_step - 1);
cols_left += cols;
}
inputs[i] = input;
input += sizeof(Register) / sizeof(float);
cols_left -= sizeof(Register) / sizeof(float);
}
auto g0 = QuantizerGrab(inputs[0], mult_reg_);
auto g1 = QuantizerGrab(inputs[1], mult_reg_);
auto g2 = QuantizerGrab(inputs[2], mult_reg_);
auto g3 = QuantizerGrab(inputs[3], mult_reg_);
auto packed0 = packs_epi32(g0, g1);
auto packed1 = packs_epi32(g2, g3);
auto packed = _mm512_packs_epi16(packed0, packed1);
packed = _mm512_max_epi8(packed, neg127);
return _mm512_permutexvar_epi32(shuffle_param, packed);
}
INTGEMM_AVX512BW inline __m512i ForReshape(const float *input, Index cols) const {
// TODO: try alternative: _mm512_cvtsepi32_epi8 ?
const __m512i neg127 = _mm512_set1_epi8(-127);
// In reverse order: grabbing the first 32-bit values from each 128-bit register, then the second 32-bit values, etc.
const __m512i shuffle_param = _mm512_set_epi32(15, 11, 7, 3, 14, 10, 6, 2, 13, 9, 5, 1, 12, 8, 4, 0);
// 32-bit format.
__m512i g0 = QuantizerGrabHalves(input, input + 2 * cols, mult_reg_);
__m512i g1 = QuantizerGrabHalves(input + 16 * cols, input + 18 * cols, mult_reg_);
__m512i g2 = QuantizerGrabHalves(input + 32 * cols, input + 34 * cols, mult_reg_);
__m512i g3 = QuantizerGrabHalves(input + 48 * cols, input + 50 * cols, mult_reg_);
// Pack 32-bit to 16-bit.
__m512i packed0 = packs_epi32(g0, g1);
__m512i packed1 = packs_epi32(g2, g3);
// Pack 16-bit to 8-bit.
__m512i packed = _mm512_packs_epi16(packed0, packed1);
// Ban -128.
packed = _mm512_max_epi8(packed, neg127);
// 0 1 2 3 16 17 18 19 32 33 34 35 48 49 50 51 4 5 6 7 20 21 22 23 36 37 38 39 52 53 54 55 8 9 10 11 24 25 26 27 40 41 42 43 56 57 58 59 12 13 14 15 28 29 30 31 44 45 46 47 60 61 62 63
return _mm512_permutexvar_epi32(shuffle_param, packed);
}
private:
const __m512 mult_reg_;
};
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_MAXABSOLUTE(__m512, INTGEMM_AVX512BW)
INTGEMM_VECTORMEANSTD(__m512, INTGEMM_AVX512BW)
} // namespace
struct AVX512_16bit {
typedef int16_t Integer;
// Currently A is prepared by quantization but this could theoretically change.
// rows * cols must be a multiple of 16.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static inline void PrepareA(const float *input, int16_t *output, float quant_mult, Index rows, Index cols) {
Quantize(input, output, quant_mult, rows * cols);
}
// Technically output can be unaligned in Quantize.
// But then it will need to be aligned for Multiply.
// size must be a multiple of 16.
// Convert to 16-bit signed integers.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void Quantize(const float *input, int16_t *output, float quant_mult, Index size) {
assert(size % 16 == 0);
assert(reinterpret_cast<uintptr_t>(input) % 64 == 0);
// Fill with the quantization multiplier.
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
const float *end = input + size;
for (; input != end; input += 16, output += 16) {
// There doesn't seem to be an unmasked version.
_mm512_mask_cvtsepi32_storeu_epi16(output, 0xffff, avx512f::QuantizerGrab(input, quant_mult_reg));
}
}
// Tile size for B; B must be a multiple of this block size.
static const Index kBTileRow = 32;
static const Index kBTileCol = 8;
/*
INTGEMM_AVX512F static void PrepareB(const float *input, int16_t *output, float quant_mult, Index rows, Index cols) {
PrepareBFor16(input, output, avx512f::QuantizeTile16(quant_mult), rows, cols);
}
*/
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_PREPARE_B_16(INTGEMM_AVX512BW, avx512f::QuantizeTile16)
INTGEMM_PREPARE_B_QUANTIZED_TRANSPOSED(INTGEMM_AVX512BW, CPUType::AVX512BW, int16_t)
INTGEMM_PREPARE_B_TRANSPOSED(INTGEMM_AVX512BW, avx512f::QuantizeTile16, int16_t)
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void SelectColumnsB(const int16_t *input, int16_t *output, Index rows, const Index *cols_begin, const Index *cols_end) {
avx512f::SelectColumnsOfB((const __m512i*)input, (__m512i*)output, rows * 2, cols_begin, cols_end);
}
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_MULTIPLY16(__m512i, INTGEMM_AVX512BW, CPUType::AVX2)
constexpr static const char *const kName = "16-bit AVX512";
static const CPUType kUses = CPUType::AVX512BW;
};
struct AVX512_8bit {
typedef int8_t Integer;
// Currently A is prepared by quantization but this could theoretically change.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static inline void PrepareA(const float *input, int8_t *output, float quant_mult, Index rows, Index cols) {
Quantize(input, output, quant_mult, rows * cols);
}
private:
/* g++ (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0 does not carry target attributes
* to the hidden function it creates in implementing #pragma omp parallel for.
* So intrinstics were not working inside the for loop when compiled with
* OMP. Also, passing register types across #pragma omp parallel for
* generated an internal compiler error.
* The problem does not occur in g++-8 (Ubuntu 8.3.0-6ubuntu1~18.04.1) 8.3.0.
* As a workaround, I split into #pragma omp parallel with boring types
* passed across the boundary then call this function with target attributes.
*/
INTGEMM_AVX512BW static void QuantizeThread(const float *input, int8_t *output, float quant_mult, std::size_t count) {
const __m512i neg127 = _mm512_set1_epi32(-127);
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
const std::size_t kBatch = sizeof(__m512i) / sizeof(float);
#pragma omp for
for (std::size_t i = 0; i < count; i += kBatch) {
__m512i asint = avx512f::QuantizerGrab(input + i, quant_mult_reg);
asint = _mm512_max_epi32(asint, neg127);
// There doesn't seem to be an unmasked version.
_mm512_mask_cvtsepi32_storeu_epi8(output + i, 0xffff, asint);
}
}
public:
// Technically output can be unaligned in Quantize.
// But then it will need to be aligned for Multiply.
// Convert to 8-bit signed integers.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void Quantize(const float *input, int8_t *output, float quant_mult, Index size) {
assert(reinterpret_cast<uintptr_t>(input) % sizeof(__m512i) == 0);
const std::size_t kBatch = sizeof(__m512i) / sizeof(float);
std::size_t fast_size = (size & ~(kBatch - 1));
const float *fast_input_end = input + fast_size;
int8_t *fast_output_end = output + fast_size;
#pragma omp parallel
{
QuantizeThread(input, output, quant_mult, fast_size);
}
std::size_t overhang = size & (kBatch - 1);
if (!overhang) return; // We needed a branch anyway for the empty case.
const __m512i neg127 = _mm512_set1_epi32(-127);
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
__m512i asint = avx512f::QuantizerGrab(fast_input_end, quant_mult_reg);
asint = _mm512_max_epi32(asint, neg127);
_mm512_mask_cvtsepi32_storeu_epi8(fast_output_end, (1 << overhang) - 1, asint);
}
// Preparing A for the signed/unsigned multiplication. Using add 127
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static inline void PrepareA(const float *input, uint8_t *output, float quant_mult, Index rows, Index cols) {
QuantizeU(input, output, quant_mult, rows * cols);
}
// Technically output can be unaligned in Quantize.
// But then it will need to be aligned for Multiply.
// Convert to 8-bit signed integers.
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void QuantizeU(const float *input, uint8_t *output, float quant_mult, Index size) {
assert(size % 16 == 0);
assert(reinterpret_cast<uintptr_t>(input) % 64 == 0);
const __m512i pos127 = _mm512_set1_epi32(127);
const __m512i zero = _mm512_setzero_si512();
const __m512 quant_mult_reg = _mm512_set1_ps(quant_mult);
const float *end = input + size;
for (; input < end; input += 16, output += 16) {
__m512i asint = avx512f::QuantizerGrab(input, quant_mult_reg);
asint = _mm512_min_epi32(asint, pos127);
asint = _mm512_add_epi32(asint, pos127);
asint = _mm512_max_epi32(asint, zero);
_mm512_mask_cvtusepi32_storeu_epi8(output, 0xffff, asint);
}
}
// Tile size for B; B must be a multiple of this block size.
static const Index kBTileRow = 64;
static const Index kBTileCol = 8;
/*
INTGEMM_AVX512F static void PrepareB(const float *input, int8_t *output, float quant_mult, Index rows, Index cols) {
PrepareBFor8(input, output, avx512f::QuantizeTile8(quant_mult), rows, cols);
}*/
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_PREPARE_B_8(INTGEMM_AVX512BW, avx512f::QuantizeTile8)
INTGEMM_PREPARE_B_QUANTIZED_TRANSPOSED(INTGEMM_AVX512BW, CPUType::AVX512BW, int8_t)
INTGEMM_PREPARE_B_TRANSPOSED(INTGEMM_AVX512BW, avx512f::QuantizeTile8, int8_t)
/* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */
INTGEMM_AVX512BW static void SelectColumnsB(const int8_t *input, int8_t *output, Index rows, const Index *cols_begin, const Index *cols_end) {
avx512f::SelectColumnsOfB((const __m512i*)input, (__m512i*)output, rows, cols_begin, cols_end);
}
// Special AVX512 implementation due to having 32 registers (so I don't have to
// allocate registers manually) and no sign instruction.
template <typename Callback>
INTGEMM_AVX512BW static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) {
typedef __m512i Register;
//typedef __m256 Float; // For quantization we only do 8 at a time.
// This is copy-paste from Multiply8_SSE2OrAVX2.
assert(width % sizeof(Register) == 0);
assert(B_cols % 8 == 0);
assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0);
assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0);
// There's 8 results for INTGEMM_AVX2 to handle.
auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback);
const int simd_width = width / sizeof(Register);
// Added for AVX512.
Register zeros = setzero_si<Register>();
// Go over 8 columns of B at a time.
#pragma omp for
for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) {
const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width;
// Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.
for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) {
// Iterate over shared (inner) dimension.
const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width);
const Register *A_end = A_live + simd_width;
const Register *B_live = B0_col;
// Do the first iteration to initialize the sums.
__m512i a = *A_live;
__mmask64 neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128));
__m512i a_positive = _mm512_abs_epi8(a);
// These will be packed 16-bit integers containing sums for each column of B multiplied by the row of A.
Register sum0 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[0], neg_mask, zeros, B_live[0]));
Register sum1 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[1], neg_mask, zeros, B_live[1]));
Register sum2 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[2], neg_mask, zeros, B_live[2]));
Register sum3 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[3], neg_mask, zeros, B_live[3]));
Register sum4 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[4], neg_mask, zeros, B_live[4]));
Register sum5 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[5], neg_mask, zeros, B_live[5]));
Register sum6 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[6], neg_mask, zeros, B_live[6]));
Register sum7 = maddubs_epi16(a_positive, _mm512_mask_sub_epi8(B_live[7], neg_mask, zeros, B_live[7]));
++A_live;
B_live += 8;
// Use A as the loop variable so the add can be done where gcc likes it
// for branch prediction.
for (; A_live != A_end; ++A_live, B_live += 8) {
// Unique code here: can we do an inline function?
// Retrieve a. We will use this as the unsigned part.
a = *A_live;
// Retrieve the conveniently consecutive values of B.
__m512i b0 = *B_live;
__m512i b1 = *(B_live + 1);
__m512i b2 = *(B_live + 2);
__m512i b3 = *(B_live + 3);
__m512i b4 = *(B_live + 4);
__m512i b5 = *(B_live + 5);
__m512i b6 = *(B_live + 6);
__m512i b7 = *(B_live + 7);
// Get a mask where a is negative.
// Didn't seem to make a difference definining sign bits here vs at top
neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128));
a_positive = _mm512_abs_epi8(a);
// Negate by subtracting from zero with a mask.
b0 = _mm512_mask_sub_epi8(b0, neg_mask, zeros, b0);
b1 = _mm512_mask_sub_epi8(b1, neg_mask, zeros, b1);
b2 = _mm512_mask_sub_epi8(b2, neg_mask, zeros, b2);
b3 = _mm512_mask_sub_epi8(b3, neg_mask, zeros, b3);
b4 = _mm512_mask_sub_epi8(b4, neg_mask, zeros, b4);
b5 = _mm512_mask_sub_epi8(b5, neg_mask, zeros, b5);
b6 = _mm512_mask_sub_epi8(b6, neg_mask, zeros, b6);
b7 = _mm512_mask_sub_epi8(b7, neg_mask, zeros, b7);
// The magic 8-bit multiply then horizontal sum into 16-bit.
b0 = _mm512_maddubs_epi16(a_positive, b0);
b1 = _mm512_maddubs_epi16(a_positive, b1);
b2 = _mm512_maddubs_epi16(a_positive, b2);
b3 = _mm512_maddubs_epi16(a_positive, b3);
b4 = _mm512_maddubs_epi16(a_positive, b4);
b5 = _mm512_maddubs_epi16(a_positive, b5);
b6 = _mm512_maddubs_epi16(a_positive, b6);
b7 = _mm512_maddubs_epi16(a_positive, b7);
// Now we have 16-bit results that are the sum of two multiplies.
// Choosing to approximate and do adds.
// Perhaps every so often we could accumulate by upcasting.
sum0 = _mm512_adds_epi16(sum0, b0);
sum1 = _mm512_adds_epi16(sum1, b1);
sum2 = _mm512_adds_epi16(sum2, b2);
sum3 = _mm512_adds_epi16(sum3, b3);
sum4 = _mm512_adds_epi16(sum4, b4);
sum5 = _mm512_adds_epi16(sum5, b5);
sum6 = _mm512_adds_epi16(sum6, b6);
sum7 = _mm512_adds_epi16(sum7, b7);
// Unique code ends: can we do an inline function?
}
// Upcast to 32-bit and horizontally add.
Register ones = set1_epi16<Register>(1);
sum0 = madd_epi16(sum0, ones);
sum1 = madd_epi16(sum1, ones);
sum2 = madd_epi16(sum2, ones);
sum3 = madd_epi16(sum3, ones);
sum4 = madd_epi16(sum4, ones);
sum5 = madd_epi16(sum5, ones);
sum6 = madd_epi16(sum6, ones);
sum7 = madd_epi16(sum7, ones);
Register pack0123 = Pack0123(sum0, sum1, sum2, sum3);
Register pack4567 = Pack0123(sum4, sum5, sum6, sum7);
auto total = PermuteSummer(pack0123, pack4567);
callback_impl(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols));
}
}
}
INTGEMM_MULTIPLY8SHIFT(__m512i, INTGEMM_AVX512BW, CPUType::AVX2)
INTGEMM_PREPAREBIASFOR8(__m512i, INTGEMM_AVX512BW, CPUType::AVX2)
constexpr static const char *const kName = "8-bit AVX512BW";
static const CPUType kUses = CPUType::AVX512BW;
};
} // namespace intgemm
#endif
|
erodr.c | #include <time.h>
#include <string.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include "vector.h"
#include "io.h"
#include "image.h"
#include "params.h"
#include "util.h"
/*
* Particle type.
*/
typedef struct particle {
vec2 pos;
vec2 dir;
double vel;
double sediment;
double water;
} particle;
/*
* gradient & height tuple.
*/
typedef struct hg_tuple {
vec2 gradient;
double height;
} hg_tuple;
/*
* Bilinearly interpolate double value at (x, y) in map.
*/
double bil_interpolate_map_double(const image *map, vec2 pos) {
double *map_buffer = (double *) map->buffer;
double u, v, ul, ur, ll, lr, ipl_l, ipl_r;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
u = pos.x - x_i;
v = pos.y - y_i;
ul = map_buffer[y_i*map->width + x_i];
ur = map_buffer[y_i*map->width + x_i + 1];
ll = map_buffer[(y_i + 1)*map->width + x_i];
lr = map_buffer[(y_i + 1)*map->width + x_i + 1];
ipl_l = (1 - v) * ul + v * ll;
ipl_r = (1 - v) * ur + v * lr;
return (1 - u) * ipl_l + u * ipl_r;
}
/*
* Deposits sediment at position `pos` in heighmap `hmap`.
* Deposition only affect immediate neighbouring gridpoints
* to `pos`.
*/
void deposit(image *hmap, vec2 pos, double amount) {
double *hmap_buffer = (double *) hmap->buffer;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
double u = pos.x - x_i;
double v = pos.y - y_i;
hmap_buffer[y_i*hmap->width + x_i] += amount * (1 - u) * (1 - v);
hmap_buffer[y_i*hmap->width + x_i + 1] += amount * u * (1 - v);
hmap_buffer[(y_i + 1)*hmap->width + x_i] += amount * (1 - u) * v;
hmap_buffer[(y_i + 1)*hmap->width + x_i + 1] += amount * u * v;
}
/*
* Erodes heighmap `hmap` at position `pos` by amount `amount`.
* Erosion is distributed over an area defined through p_radius.
*/
void erode(image *hmap, vec2 pos, double amount, int radius) {
double *hmap_buffer = (double *) hmap->buffer;
if(radius < 1){
deposit(hmap, pos, -amount);
return;
}
int x0 = (int)pos.x - radius;
int y0 = (int)pos.y - radius;
int x_start = max(0, x0);
int y_start = max(0, y0);
int x_end = min(hmap->width, x0+2*radius+1);
int y_end = min(hmap->height, y0+2*radius+1);
// construct erosion/deposition kernel.
double kernel[2*radius + 1][2*radius + 1];
double kernel_sum = 0;
for(int y = y_start; y < y_end; y++) {
for(int x = x_start; x < x_end; x++) {
double d_x = x - pos.x;
double d_y = y - pos.y;
double distance = sqrt(d_x*d_x + d_y*d_y);
double w = fmax(0, radius - distance);
kernel_sum += w;
kernel[y-y0][x-x0] = w;
}
}
// normalize weights and apply changes on heighmap.
for(int y = y_start; y < y_end; y++) {
for(int x = x_start; x < x_end; x++) {
kernel[y-y0][x-x0] /= kernel_sum;
hmap_buffer[y*hmap->width + x] -= amount * kernel[y-y0][x-x0];
}
}
}
/*
* Returns gradient at (int x, int y) on heightmap `hmap`.
*/
vec2 gradient_at(image *hmap, int x, int y) {
double *hmap_buffer = (double *) hmap->buffer;
int idx = y * hmap->width + x;
//int right = y * hmap->width + min(x, hmap->width - 2);
//int below = min(y, hmap->height - 2) * hmap->width + x;
int right = idx + ((x > hmap->width - 2) ? 0 : 1);
int below = idx + ((y > hmap->height - 2) ? 0 : hmap->width);
vec2 g;
g.x = hmap_buffer[right] - hmap_buffer[idx];
g.y = hmap_buffer[below] - hmap_buffer[idx];
return g;
}
/*
* Returns interpolated gradient and height at (double x, double y) on
* heightmap `hmap`.
*/
hg_tuple height_gradient_at(image *hmap, vec2 pos) {
hg_tuple ret;
vec2 ul, ur, ll, lr, ipl_l, ipl_r;
int x_i = (int)pos.x;
int y_i = (int)pos.y;
double u = pos.x - x_i;
double v = pos.y - y_i;
ul = gradient_at(hmap, x_i, y_i);
ur = gradient_at(hmap, x_i + 1, y_i);
ll = gradient_at(hmap, x_i, y_i + 1);
lr = gradient_at(hmap, x_i + 1, y_i + 1);
ipl_l = add(scalar_mul(1 - v, ul), scalar_mul(v, ll));
ipl_r = add(scalar_mul(1 - v, ur), scalar_mul(v, lr));
ret.gradient = add(scalar_mul(1 - u, ipl_l), scalar_mul(u, ipl_r));
ret.height = bil_interpolate_map_double(hmap, pos);
return ret;
}
/*
* Runs hydraulic erosion simulation.
*/
void simulate_particles(image *hmap, sim_params *params) {
srand(time(NULL));
// simulate each particle
#pragma omp parallel for
for(int i = 0; i < params->n; i++) {
if(!((i+1) % 10000))
printf("Particles simulated: %d\n", i+1);
// spawn particle.
particle p;
double denom = (RAND_MAX / ((double)hmap->width - 1.0));
p.pos = (vec2){(double)rand() / denom, (double)rand() / denom};
p.dir = (vec2){0, 0};
p.vel = 0;
p.sediment = 0;
p.water = 1;
for(int j = 0; j < params->ttl; j++) {
// interpolate gradient g and height h_old at p's position.
vec2 pos_old = p.pos;
hg_tuple hg = height_gradient_at(hmap, pos_old);
vec2 g = hg.gradient;
double h_old = hg.height;
// calculate new dir vector
p.dir = sub(
scalar_mul(params->p_enertia, p.dir),
scalar_mul(1 - params->p_enertia, g)
);
normalize(&p.dir);
// calculate new pos
p.pos = add(p.pos, p.dir);
// check bounds
vec2 pos_new = p.pos;
if(pos_new.x > (hmap->width-1) || pos_new.x < 0 ||
pos_new.y > (hmap->height-1) || pos_new.y < 0)
break;
// new height
double h_new = bil_interpolate_map_double(hmap, pos_new);
double h_diff = h_new - h_old;
// sediment capacity
double c = fmax(-h_diff, params->p_min_slope) * p.vel * p.water * params->p_capacity;
// decide whether to erode or deposit depending on particle properties
if(h_diff > 0 || p.sediment > c) {
double to_deposit = (h_diff > 0) ?
fmin(p.sediment, h_diff) :
(p.sediment - c) * params->p_deposition;
p.sediment -= to_deposit;
deposit(hmap, pos_old, to_deposit);
} else {
double to_erode = fmin((c - p.sediment) * params->p_erosion, -h_diff);
p.sediment += to_erode;
erode(hmap, pos_old, to_erode, params->p_radius);
}
// update `vel` and `water`
p.vel = sqrt(p.vel*p.vel + h_diff*params->p_gravity);
p.water *= (1 - params->p_evaporation);
}
}
}
/*
* Main.
*/
int main(int argc, char *argv[]) {
sim_params params = DEFAULT_PARAM;
image img;
// parse args.
char filepath[FILEPATH_MAXLEN];
char outputfilepath[FILEPATH_MAXLEN];
strcpy(outputfilepath, OUTPUTFILEPATH_DEFAULT);
bool ascii_out = false;
if(parse_args(argc, argv, filepath, outputfilepath, ¶ms, &ascii_out))
exit_with_info(1);
// load pgm heightmap.
if(load_pgm(filepath, &img))
exit_with_info(1);
// simulate hydraulic erosion
simulate_particles(&img, ¶ms);
// Save results
save_pgm(outputfilepath, &img, ascii_out);
// free memory
release_image(&img);
}
|
errorAbs.h | #pragma once
#include <cmath>
#include <vector>
#include <unordered_map>
#include <algorithm>
#include <memory>
#include "_cuda.h"
#include "ceilDiv.h"
#include "sum.h"
using std::vector;
using std::unique_ptr;
using std::abs;
using std::max;
// Finds absolute error between 2 vectors.
template <class T>
T errorAbs(T *x, T *y, int N) {
T a = T();
for (int i=0; i<N; i++)
a += abs(x[i] - y[i]);
return a;
}
template <class T>
T errorAbs(vector<T>& x, vector<T>& y) {
return errorAbs(x.data(), y.data(), x.size());
}
template <class K, class T>
T errorAbs(unordered_map<K, T>& x, unordered_map<K, T>& y) {
T a = T();
for (auto& p : x)
a += abs(p.second - y[p.first]);
return a;
}
template <class T>
T errorAbsOmp(T *x, T *y, int N) {
T a = T();
#pragma omp parallel for reduction (+:a)
for (int i=0; i<N; i++)
a += abs(x[i] - y[i]);
return a;
}
template <class T>
T errorAbsOmp(vector<T>& x, vector<T>& y) {
return errorAbsOmp(x.data(), y.data(), x.size());
}
template <class T>
__device__ T errorAbsKernelLoop(T *x, T *y, int N, int i, int DI) {
T a = T();
for (; i<N; i+=DI)
a += abs(x[i] - y[i]);
return a;
}
template <class T>
__global__ void errorAbsKernel(T *a, T *x, T *y, int N) {
DEFINE(t, b, B, G);
__shared__ T cache[_THREADS];
cache[t] = errorAbsKernelLoop(x, y, N, B*b+t, G*B);
sumKernelReduce(cache, B, t);
if (t == 0) a[b] = cache[0];
}
template <class T>
T errorAbsCuda(T *x, T *y, int N) {
int threads = _THREADS;
int blocks = min(ceilDiv(N, threads), _BLOCKS);
size_t X1 = N * sizeof(T);
size_t A1 = blocks * sizeof(T);
unique_ptr<T> a(new T[A1]);
T *xD, *yD, *aD;
TRY( cudaMalloc(&xD, X1) );
TRY( cudaMalloc(&yD, X1) );
TRY( cudaMalloc(&aD, A1) );
TRY( cudaMemcpy(xD, x, X1, cudaMemcpyHostToDevice) );
TRY( cudaMemcpy(yD, y, X1, cudaMemcpyHostToDevice) );
errorAbsKernel<<<blocks, threads>>>(aD, xD, yD, N);
TRY( cudaMemcpy(a.get(), aD, A1, cudaMemcpyDeviceToHost) );
TRY( cudaFree(yD) );
TRY( cudaFree(xD) );
TRY( cudaFree(aD) );
return sum(a.get(), blocks);
}
template <class T>
T errorAbsCuda(vector<T>& x, vector<T>& y) {
return errorAbsCuda(x.data(), y.data(), x.size());
}
|
residual_based_bdf_scheme.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_BASED_BDF_SCHEME )
#define KRATOS_RESIDUAL_BASED_BDF_SCHEME
/* System includes */
/* External includes */
/* Project includes */
#include "includes/checks.h"
#include "utilities/time_discretization.h"
#include "solving_strategies/schemes/residual_based_implicit_time_scheme.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualBasedBDFScheme
* @ingroup KratosCore
* @brief BDF integration scheme (for dynamic problems)
* @details The \f$ n \f$ order Backward Differentiation Formula (BDF) method is a two step \f$ n \f$ order accurate method.
* This scheme is designed to solve a system of the type:
*\f[
* \mathbf{M} \frac{d^2(u_{n0})}{dt^2} + \mathbf{D} \frac{d(un0)}{dt} + \mathbf{K} u_{n0} = \mathbf{f}_{ext}
* \f]
*
* If we call:
*
* - Second derivative:
* -# \f$ \ddot{u}_{ni} \f$ the second derivative at the step i
* - First derivative:
* -# \f$ \dot{u}_{ni} \f$ the first derivative at the step i
* - Third derivative:
* -# \f$ u_{ni} \f$ the variable at the step i
*
* Then we assume:
* \f[ \frac{d^2(u_{n0})}{dt^2} \|t_{n0} = \sum_i c_i \dot{u}_{ni} \f]
* \f[ \frac{d(u_{n0})}{dt} \|t_{n0} = \sum_i c_i u_{n0} \f]
* with for order 2 (BDF2):
* -# \f$ c_0 = \frac{1.5}{dt} \f$
* -# \f$ c_1 = \frac{-2.0}{dt} \f$
* -# \f$ c_2 = \frac{0.5}{dt} \f$
*
* The LHS and RHS can be defined as:
* \f[ RHS = \mathbf{f}_{ext} - \mathbf{M} \frac{d(\dot{u}_{n0})}{dt} - \mathbf{D} \frac{d(u_{n0})}{dt} - \mathbf{K} u_{n0} \f]
* and
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2 \mathbf{M} + c_0 \mathbf{D} + K \f]
* @note This implies that elements are expected to be written in terms
* of a variable with two time derivatives
* <a href="https://mediatum.ub.tum.de/doc/1223319/80942.pdf">Main reference</a>
* @todo Create a BibTeX file https://www.stack.nl/~dimitri/doxygen/manual/commands.html#cmdcite
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class ResidualBasedBDFScheme
: public ResidualBasedImplicitTimeScheme<TSparseSpace, TDenseSpace>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualBasedBDFScheme );
typedef Scheme<TSparseSpace,TDenseSpace> BaseType;
typedef typename BaseType::Pointer BaseTypePointer;
typedef ResidualBasedImplicitTimeScheme<TSparseSpace,TDenseSpace> ImplicitBaseType;
typedef typename ImplicitBaseType::TDataType TDataType;
typedef typename ImplicitBaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename ImplicitBaseType::TSystemMatrixType TSystemMatrixType;
typedef typename ImplicitBaseType::TSystemVectorType TSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename ImplicitBaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef ModelPart::NodesContainerType NodesArrayType;
/// Definition of epsilon
static constexpr double ZeroTolerance = std::numeric_limits<double>::epsilon();
///@}
///@name Life Cycle
///@{
/**
* @brief Constructor. The BDF method
* @param Order The integration order
* @todo The ideal would be to use directly the dof or the variable itself to identify the type of variable and is derivatives
*/
explicit ResidualBasedBDFScheme(const std::size_t Order = 2)
:ImplicitBaseType(),
mOrder(Order),
mpBDFUtility(Kratos::make_unique<TimeDiscretization::BDF>(Order))
{
// Allocate auxiliary memory
const std::size_t num_threads = ParallelUtilities::GetNumThreads();
mVector.dotun0.resize(num_threads);
mVector.dot2un0.resize(num_threads);
// Doing a minimal check
KRATOS_ERROR_IF(mOrder < 1) << "ERROR:: Not possible to compute a BDF of order less than 1" << std::endl;
// We resize the BDF coefficients
if (mBDF.size() != (mOrder + 1))
mBDF.resize(mOrder + 1);
}
/** Copy Constructor.
*/
explicit ResidualBasedBDFScheme(ResidualBasedBDFScheme& rOther)
:ImplicitBaseType(rOther)
,mOrder(rOther.mOrder)
,mBDF(rOther.mBDF)
,mVector(rOther.mVector)
,mpBDFUtility(nullptr)
{
Kratos::unique_ptr<TimeDiscretization::BDF> auxiliar_pointer = Kratos::make_unique<TimeDiscretization::BDF>(mOrder);
mpBDFUtility.swap(auxiliar_pointer);
}
/**
* Clone
*/
BaseTypePointer Clone() override
{
return BaseTypePointer( new ResidualBasedBDFScheme(*this) );
}
/** Destructor.
*/
~ResidualBasedBDFScheme
() override {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief Performing the update of the solution
* @details Incremental update within newton iteration. It updates the state variables at the end of the time step
* \f[ u_{n+1}^{k+1}= u_{n+1}^{k}+ \Delta u\f]
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
void Update(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
// Update of displacement (by DOF)
mpDofUpdater->UpdateDofs(rDofSet, rDx);
UpdateDerivatives(rModelPart, rDofSet, rA, rDx, rb);
KRATOS_CATCH( "" );
}
/**
* @brief Performing the prediction of the solution
* @details It predicts the solution for the current step x = xold + vold * Dt
* @param rModelPart The model of the problem to solve
* @param rDofSet set of all primary variables
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
*/
void Predict(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
KRATOS_ERROR << "Calling base BDF class" << std::endl;
KRATOS_CATCH( "" );
}
/**
* @brief It initializes time step solution. Only for reasons if the time step solution is restarted
* @param rModelPart The model of the problem to solve
* @param rA LHS matrix
* @param rDx Incremental update of primary variables
* @param rb RHS Vector
* @todo I cannot find the formula for the higher orders with variable time step. I tried to deduce by myself but the result was very unstable
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
) override
{
KRATOS_TRY;
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
ImplicitBaseType::InitializeSolutionStep(rModelPart, rA, rDx, rb);
mpBDFUtility->ComputeAndSaveBDFCoefficients(r_current_process_info);
mBDF = r_current_process_info[BDF_COEFFICIENTS];
KRATOS_WARNING_IF("ResidualBasedBDFScheme", mOrder > 2)
<< "For higher orders than 2 the time step is assumed to be constant.\n";
KRATOS_CATCH( "" );
}
/**
* @brief This function is designed to be called once to perform all the checks needed on the input provided.
* @details Checks can be "expensive" as the function is designed to catch user's errors.
* @param rModelPart The model of the problem to solve
* @return Zero means all ok
*/
int Check(const ModelPart& rModelPart) const override
{
KRATOS_TRY;
const int err = ImplicitBaseType::Check(rModelPart);
if(err!=0) return err;
// Check for minimum value of the buffer index
// Verify buffer size
KRATOS_ERROR_IF(rModelPart.GetBufferSize() < mOrder + 1) << "Insufficient buffer size. Buffer size should be greater than " << mOrder + 1 << ". Current size is " << rModelPart.GetBufferSize() << std::endl;
KRATOS_CATCH( "" );
return 0;
}
/// Free memory allocated by this class.
void Clear() override
{
this->mpDofUpdater->Clear();
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/**
* @brief This method provides the defaults parameters to avoid conflicts between the different constructors
* @return The default parameters
*/
Parameters GetDefaultParameters() const override
{
Parameters default_parameters = Parameters(R"(
{
"name" : "base_bdf_scheme",
"integration_order" : 2
})");
// Getting base class default parameters
const Parameters base_default_parameters = ImplicitBaseType::GetDefaultParameters();
default_parameters.RecursivelyAddMissingParameters(base_default_parameters);
return default_parameters;
}
/// Turn back information as a string.
std::string Info() const override
{
return "ResidualBasedBDFScheme";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << Info();
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
rOStream << Info();
}
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
struct GeneralVectors
{
std::vector< Vector > dotun0; /// First derivative
std::vector< Vector > dot2un0; /// Second derivative
};
const std::size_t mOrder; /// The integration order
Vector mBDF; /// The BDF coefficients
GeneralVectors mVector; /// The structure containing the derivatives
Kratos::unique_ptr<TimeDiscretization::BDF> mpBDFUtility; /// Utility to compute BDF coefficients
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Performing the update of the derivatives
* @param rModelPart The model of the problem to solve
* @param rDofSet Set of all primary variables
* @param rA LHS matrix
* @param rDx incremental update of primary variables
* @param rb RHS Vector
*/
inline void UpdateDerivatives(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& rA,
TSystemVectorType& rDx,
TSystemVectorType& rb
)
{
// Updating time derivatives (nodally for efficiency)
const int num_nodes = static_cast<int>( rModelPart.Nodes().size() );
// Getting first node iterator
const auto it_node_begin = rModelPart.Nodes().begin();
#pragma omp parallel for
for(int i = 0; i< num_nodes; ++i) {
auto it_node = it_node_begin + i;
UpdateFirstDerivative(it_node);
UpdateSecondDerivative(it_node);
}
}
/**
* @brief Updating first time derivative (velocity)
* @param itNode the node interator
*/
virtual inline void UpdateFirstDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief Updating second time derivative (acceleration)
* @param itNode the node interator
*/
virtual inline void UpdateSecondDerivative(NodesArrayType::iterator itNode)
{
KRATOS_ERROR << "Calling base BDF class" << std::endl;
}
/**
* @brief It adds the dynamic LHS contribution of the elements
* \f[ LHS = \frac{d(-RHS)}{d(u_{n0})} = c_0^2\mathbf{M} + c_0 \mathbf{D} + \mathbf{K} \f]
* @param rLHS_Contribution The dynamic contribution for the LHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToLHS(
LocalSystemMatrixType& rLHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
// Adding mass contribution to the dynamic stiffness
if (rM.size1() != 0) { // if M matrix declared
noalias(rLHS_Contribution) += rM * std::pow(mBDF[0], 2);
}
// Adding damping contribution
if (rD.size1() != 0) { // if D matrix declared
noalias(rLHS_Contribution) += rD * mBDF[0];
}
}
/**
* @brief It adds the dynamic RHS contribution of the objects
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rObject The object to compute
* @param rRHS_Contribution The dynamic contribution for the RHS
* @param rD The damping matrix
* @param rM The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
template <class TObjectType>
void TemplateAddDynamicsToRHS(
TObjectType& rObject,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
)
{
const std::size_t this_thread = OpenMPUtils::ThisThread();
const auto& r_const_obj_ref = rObject;
// Adding inertia contribution
if (rM.size1() != 0) {
r_const_obj_ref.GetSecondDerivativesVector(mVector.dot2un0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rM, mVector.dot2un0[this_thread]);
}
// Adding damping contribution
if (rD.size1() != 0) {
r_const_obj_ref.GetFirstDerivativesVector(mVector.dotun0[this_thread], 0);
noalias(rRHS_Contribution) -= prod(rD, mVector.dotun0[this_thread]);
}
}
/**
* @brief It adds the dynamic RHS contribution of the elements
* \f[ \mathbf{b} - \mathbf{M} a - \mathbf{D} v \f]
* @param rElement The element to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Element& rElement,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Element>(rElement, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
/**
* @brief It adds the dynamic RHS contribution of the condition
* \f[ RHS = f_{ext} - \ddot{u}_{n0} \mathbf{M} + \dot{u}_{n0} \mathbf{D} + u_{n0} \mathbf{K} \f]
* @param rCondition The condition to compute
* @param RHS_Contribution The dynamic contribution for the RHS
* @param D The damping matrix
* @param M The mass matrix
* @param rCurrentProcessInfo The current process info instance
*/
void AddDynamicsToRHS(
Condition& rCondition,
LocalSystemVectorType& rRHS_Contribution,
LocalSystemMatrixType& rD,
LocalSystemMatrixType& rM,
const ProcessInfo& rCurrentProcessInfo
) override
{
TemplateAddDynamicsToRHS<Condition>(rCondition, rRHS_Contribution, rD, rM, rCurrentProcessInfo);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
/// Utility class to perform the update after solving the system, will be different in MPI runs.
typename TSparseSpace::DofUpdaterPointerType mpDofUpdater = TSparseSpace::CreateDofUpdater();
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ResidualBasedBDFScheme */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUAL_BASED_BDF_SCHEME defined */
|
Par-12-SeqForParForNestedParFor.c |
int main(int argc, char **argv) {
int a[4] = {1,2,3,4};
int b[4] = {1,1,1,1};
for (int i = 0; i < 1; ++i) {
if (i < 2) {
return -1;
}
}
#pragma omp parallel for
for (int i = 0; i < 4; ++i) {
a[i] = 3*a[i];
#pragma omp parallel for
for(int j = 0; j < 4; ++j) {
b[j] = b[j] + a[i];
}
}
return 0;
}
|
pmtv-OpenMP.c | /*
Multiplica una matriz triangular por un vector
Para compilar usar (-lrt: real time library):
gcc -O2 archivo.c -o nombre -lrt
gcc -O2 –S archivo.c -lrt //para generar el código ensamblador
*/
#include <stdlib.h> // biblioteca con funciones atoi(), malloc() y free()
#include <stdio.h> // biblioteca donde se encuentra la función printf()
#include <time.h> // biblioteca donde se encuentra la función clock_gettime()
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
//#define PRINTF_ALL// comentar para quitar el printf ...
// que imprime todos los componentes
int main(int argc, char** argv){
int i, j;
double cgt1,cgt2; double ncgt; //para tiempo de ejecución
//Leer argumento de entrada (nº de componentes del vector)
if (argc<2){
printf("Formato:\n./pmtv-secuencial tamaño \n");
exit(-1);
}
unsigned int N = atoi(argv[1]);
double *v1;
double *v2;
double **M;
v1 = malloc(N*sizeof(double));// malloc necesita el tamaño en bytes
v2 = malloc(N*sizeof(double));
M = malloc(N*sizeof(double*));
for(i = 0; i < N; i++)
M[i] = malloc((N-i)*sizeof(double));
for(i=0; i<N; i++){
v1[i] = N*0.1+i*0.1;
for(j=0; j<N-i; j++)
M[i][j] = N*0.1-j*0.1;
}
double suma;
cgt1 = omp_get_wtime();
#pragma omp parallel for private(suma,j) shared(N,M,v1,v2) schedule(runtime)
for(i = 0; i < N; i++){
suma = 0;
for(j = 0; j < N-i; j++)
suma += M[i][j]*v1[j];
v2[i] = suma;
}
cgt2 = omp_get_wtime();
ncgt=cgt2-cgt1;
//Imprimir resultado y el tiempo de ejecución
#ifdef PRINTF_ALL
printf("Tiempo(seg.):%11.9f\t / Tamaño:%u\n",ncgt,N);
printf("Resulado:\n");
for(i=0; i<N; i++)
printf("V[%d] = %8.6f /",i,v2[i]);
printf("\n");
#else
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\t/ V[0]=%8.6f / / V[%d]=%8.6f /\n",ncgt,N,v2[0],N-1,v2[N-1]);
#endif
free(v1); // libera el espacio reservado para v1
free(v2); // libera el espacio reservado para v2
for(i = 0; i < N; i++)
free(M[i]);
free(M);
return 0;
}
|
matsub.c | #include "matrix.h"
/** \brief Subtracts a matrix from another matrix
*
* \param[in] A First input matrix
* \param[in] B Second input matrix
* \param[in] result Matrix to store the result
* \return \f$ \mathbf{A}- \mathbf{B} \f$
*
*/
MATRIX mat_sub(MATRIX A, MATRIX B, MATRIX result)
{
int i, j, m, n, o, p;
m = MatCol(A);
n = MatRow(A);
o = MatCol(B);
p = MatRow(B);
if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
if(o==m &&p==n)
{
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]-B[i][j];
}
}
}
else if(o==1 && p!=1)
{
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]-B[i][0];
}
}
}
else if(p==1 && o!=1)
{
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]-B[0][j];
}
}
}
else gen_error(GEN_SIZEMISMATCH);
return result;
}
/** \brief Subtracts a scalar from a matrix
*
* \param[in] A Input matrix
* \param[in] s Input scalar
* \param[in] result Matrix to store the result
* \return \f$ \mathbf{A}-s\mathbf{11}^T \f$
*
*/
MATRIX mat_subs(MATRIX A, mtype s, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = A[i][j]-s;
}
}
return result;
}
/** \brief Subtracts a matrix from a scalar
*
* \param[in] A Input matrix
* \param[in] s Input scalar
* \param[in] result Matrix to store the result
* \return \f$ s\mathbf{11}^T -\mathbf{A} \f$
*
*/
MATRIX mat_subs_neg(MATRIX A, mtype s, MATRIX result)
{
int i, j, m, n;
m = MatCol(A);
n = MatRow(A);
if(result==NULL) if((result = mat_creat(MatRow(A), MatCol(A), UNDEFINED))==NULL)
return mat_error(MAT_MALLOC);
#pragma omp parallel for private(j)
for(i=0; i<n; ++i)
{
for(j=0; j<m; ++j)
{
result[i][j] = s-A[i][j];
}
}
return result;
}
/** \brief Subtracts an integer vector from integer vector
*
* \param[in] A Input vector
* \param[in] B Input vector
* \param[in] result Vector to store the result
* \return \f$ \mathbf{A}-\mathbf{B} \f$
*
*/
INT_VECTOR int_vec_sub(INT_VECTOR A, INT_VECTOR B, INT_VECTOR result)
{
int i, m;
m = Int_VecLen(A);
if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL)
int_vec_error(INT_VEC_MALLOC);
if(m!=Int_VecLen(B)) gen_error(GEN_SIZEMISMATCH);
#pragma omp parallel for
for(i=0; i<m; ++i) result[i] = A[i]-B[i];
return result;
}
/** \brief Subtracts an integer from integer vector
*
* \param[in] A Input vector
* \param[in] s Input scalar
* \param[in] result Vector to store the result
* \return \f$ \mathbf{A}-s\mathbf{1} \f$
*
*/
INT_VECTOR int_vec_subs(INT_VECTOR A, int s, INT_VECTOR result)
{
int i, m;
m = Int_VecLen(A);
if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL)
int_vec_error(INT_VEC_MALLOC);
#pragma omp parallel for
for(i=0; i<m; ++i) result[i] = A[i]-s;
return result;
}
/** \brief Subtracts an integer vector from an integer
*
* \param[in] A Input vector
* \param[in] s Input scalar
* \param[in] result Vector to store the result
* \return \f$ s\mathbf{1}-\mathbf{A} \f$
*
*/
INT_VECTOR int_vec_subs_neg(INT_VECTOR A, int s, INT_VECTOR result)
{
int i, m;
m = Int_VecLen(A);
if(result==NULL) if((result = int_vec_creat(m, UNDEFINED))==NULL)
int_vec_error(INT_VEC_MALLOC);
#pragma omp parallel for
for(i=0; i<m; ++i) result[i] = s-A[i];
return result;
}
|
ExceptionPropagator.h | #if !defined EXCEPTION_PROPAGATOR_H
#define EXCEPTION_PROPAGATOR_H
// Experimental class to propagate exceptions around OpenMP pragmas (and other
// constructs) that cannot cope with exceptions. Exceptions thrown inside a lambda
// function that is given to the () operator are propagated and rethrown when the
// progagation object is destroyed. Example use case:
//
// ExceptionPropagator ep;
//
// #pragma omp parallel for
// for (int i = 0; i < 10000; i ++)
// if (!ep) // finish loop ASAP if exception is pending
// ep([&] () {
// throw 42; // continue with code that might throw exceptions
// });
//
// // exception is rethrown at scope exit of ep
#include <atomic>
#include <exception>
class ExceptionPropagator
{
public:
ExceptionPropagator()
:
propagateException(ATOMIC_FLAG_INIT)
{
}
~ExceptionPropagator() noexcept(false)
{
if (exception != nullptr && !std::uncaught_exception())
std::rethrow_exception(exception);
}
template <typename T> void operator () (const T &func)
{
try {
func();
} catch (...) {
if (!atomic_flag_test_and_set(&propagateException))
exception = std::current_exception();
}
}
operator bool () const // returns true iff exception pending
{
return exception != nullptr;
}
private:
std::atomic_flag propagateException;
std::exception_ptr exception;
};
#endif
|
convolution_sgemm_pack4to1_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack4to1_bf16s_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
// Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
const float* bias = _bias;
// permute
Mat tmp;
#if __aarch64__
if (size >= 12)
tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + size % 12 % 4, 8u, 4, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 8u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
#else
if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 8u, 4, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + size % 4, 8u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 4, opt.workspace_allocator);
#endif
{
#if __aarch64__
int nn_size = size / 12;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 12;
unsigned short* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x12
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.4h, v5.4h, v6.4h, v7.4h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
"st1 {v4.4h}, [%1], #8 \n"
"st1 {v1.8h}, [%1], #16 \n"
"st1 {v5.4h}, [%1], #8 \n"
"sub %0, %0, #64 \n"
"st1 {v2.8h}, [%1], #16 \n"
"st1 {v6.4h}, [%1], #8 \n"
"st1 {v3.8h}, [%1], #16 \n"
"st1 {v7.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size * 4;
}
}
}
remain_size_start += nn_size * 12;
nn_size = (size - remain_size_start) >> 3;
#else
int nn_size = size >> 3;
int remain_size_start = 0;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x8
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0]! \n"
"pld [%0, #256] \n"
"vld4.u16 {d4-d7}, [%0] \n"
"sub %0, %0, #32 \n"
"vst1.u16 {d0}, [%1 :64]! \n"
"vst1.u16 {d4}, [%1 :64]! \n"
"vst1.u16 {d1}, [%1 :64]! \n"
"vst1.u16 {d5}, [%1 :64]! \n"
"vst1.u16 {d2}, [%1 :64]! \n"
"vst1.u16 {d6}, [%1 :64]! \n"
"vst1.u16 {d3}, [%1 :64]! \n"
"vst1.u16 {d7}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
// transpose 4x4
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld4 {v0.4h, v1.4h, v2.4h, v3.4h}, [%0] \n"
"st1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.u16 {d0-d3}, [%0 :128] \n"
"vst1.u16 {d0-d3}, [%1 :128]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0", "q1");
#endif // __aarch64__
img0 += size * 4;
}
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
#endif
for (int q = 0; q < inch; q++)
{
const unsigned short* img0 = (const unsigned short*)bottom_im2col.channel(q) + i * 4;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.4h}, [%0] \n"
"st1 {v0.4h}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.u16 {d0}, [%0 :64] \n"
"vst1.u16 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif // __aarch64__
img0 += size * 4;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
unsigned short* outptr2 = top_blob.channel(p + 2);
unsigned short* outptr3 = top_blob.channel(p + 3);
unsigned short* outptr4 = top_blob.channel(p + 4);
unsigned short* outptr5 = top_blob.channel(p + 5);
unsigned short* outptr6 = top_blob.channel(p + 6);
unsigned short* outptr7 = top_blob.channel(p + 7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v8.4s, v30.s[0] \n"
"dup v9.4s, v30.s[0] \n"
"dup v10.4s, v30.s[0] \n"
"dup v11.4s, v30.s[1] \n"
"dup v12.4s, v30.s[1] \n"
"dup v13.4s, v30.s[1] \n"
"dup v14.4s, v30.s[2] \n"
"dup v15.4s, v30.s[2] \n"
"dup v16.4s, v30.s[2] \n"
"dup v17.4s, v30.s[3] \n"
"dup v18.4s, v30.s[3] \n"
"dup v19.4s, v30.s[3] \n"
"dup v20.4s, v31.s[0] \n"
"dup v21.4s, v31.s[0] \n"
"dup v22.4s, v31.s[0] \n"
"dup v23.4s, v31.s[1] \n"
"dup v24.4s, v31.s[1] \n"
"dup v25.4s, v31.s[1] \n"
"dup v26.4s, v31.s[2] \n"
"dup v27.4s, v31.s[2] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[3] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
"st1 {v20.4h, v21.4h, v22.4h}, [%5], #24 \n"
"st1 {v23.4h, v24.4h, v25.4h}, [%6], #24 \n"
"st1 {v26.4h, v27.4h, v28.4h}, [%7], #24 \n"
"st1 {v29.4h, v30.4h, v31.4h}, [%8], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v30.4s, v31.4s}, [%22] \n"
"dup v16.4s, v30.s[0] \n"
"dup v17.4s, v30.s[0] \n"
"dup v18.4s, v30.s[1] \n"
"dup v19.4s, v30.s[1] \n"
"dup v20.4s, v30.s[2] \n"
"dup v21.4s, v30.s[2] \n"
"dup v22.4s, v30.s[3] \n"
"dup v23.4s, v30.s[3] \n"
"dup v24.4s, v31.s[0] \n"
"dup v25.4s, v31.s[0] \n"
"dup v26.4s, v31.s[1] \n"
"dup v27.4s, v31.s[1] \n"
"dup v28.4s, v31.s[2] \n"
"dup v29.4s, v31.s[2] \n"
"dup v30.4s, v31.s[3] \n"
"dup v31.4s, v31.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"shrn v24.4h, v24.4s, #16 \n"
"shrn v25.4h, v25.4s, #16 \n"
"shrn v26.4h, v26.4s, #16 \n"
"shrn v27.4h, v27.4s, #16 \n"
"shrn v28.4h, v28.4s, #16 \n"
"shrn v29.4h, v29.4s, #16 \n"
"shrn v30.4h, v30.4s, #16 \n"
"shrn v31.4h, v31.4s, #16 \n"
"st1 {v16.4h, v17.4h}, [%1], #16 \n"
"st1 {v18.4h, v19.4h}, [%2], #16 \n"
"st1 {v20.4h, v21.4h}, [%3], #16 \n"
"st1 {v22.4h, v23.4h}, [%4], #16 \n"
"st1 {v24.4h, v25.4h}, [%5], #16 \n"
"st1 {v26.4h, v27.4h}, [%6], #16 \n"
"st1 {v28.4h, v29.4h}, [%7], #16 \n"
"st1 {v30.4h, v31.4h}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < size; i += 4)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v22.4s, v23.4s}, [%22] \n"
"dup v16.4s, v22.s[0] \n"
"dup v17.4s, v22.s[1] \n"
"dup v18.4s, v22.s[2] \n"
"dup v19.4s, v22.s[3] \n"
"dup v20.4s, v23.s[0] \n"
"dup v21.4s, v23.s[1] \n"
"dup v22.4s, v23.s[2] \n"
"dup v23.4s, v23.s[3] \n"
"0: \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%9], #32 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"shrn v20.4h, v20.4s, #16 \n"
"shrn v21.4h, v21.4s, #16 \n"
"shrn v22.4h, v22.4s, #16 \n"
"shrn v23.4h, v23.4s, #16 \n"
"st1 {v16.4h}, [%1], #8 \n"
"st1 {v17.4h}, [%2], #8 \n"
"st1 {v18.4h}, [%3], #8 \n"
"st1 {v19.4h}, [%4], #8 \n"
"st1 {v20.4h}, [%5], #8 \n"
"st1 {v21.4h}, [%6], #8 \n"
"st1 {v22.4h}, [%7], #8 \n"
"st1 {v23.4h}, [%8], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < size; i++)
{
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v16.4s, v17.4s}, [%22] \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #64] \n"
"ld1 {v0.4h}, [%9], #8 \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%10], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #256] \n"
"ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [%10], #32 \n"
"shll v8.4s, v8.4h, #16 \n"
"shll v9.4s, v9.4h, #16 \n"
"shll v10.4s, v10.4h, #16 \n"
"shll v11.4s, v11.4h, #16 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"st1 {v16.h}[0], [%1], #2 \n"
"st1 {v16.h}[1], [%2], #2 \n"
"st1 {v16.h}[2], [%3], #2 \n"
"st1 {v16.h}[3], [%4], #2 \n"
"st1 {v17.h}[0], [%5], #2 \n"
"st1 {v17.h}[1], [%6], #2 \n"
"st1 {v17.h}[2], [%7], #2 \n"
"st1 {v17.h}[3], [%8], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(outptr4), // %5
"=r"(outptr5), // %6
"=r"(outptr6), // %7
"=r"(outptr7), // %8
"=r"(tmpptr), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(outptr4),
"6"(outptr5),
"7"(outptr6),
"8"(outptr7),
"9"(tmpptr),
"10"(kptr),
"r"(biasptr) // %22
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
unsigned short* outptr0 = top_blob.channel(p);
unsigned short* outptr1 = top_blob.channel(p + 1);
unsigned short* outptr2 = top_blob.channel(p + 2);
unsigned short* outptr3 = top_blob.channel(p + 3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v19.4s}, [%14] \n"
"dup v8.4s, v19.s[0] \n"
"dup v9.4s, v19.s[0] \n"
"dup v10.4s, v19.s[0] \n"
"dup v11.4s, v19.s[1] \n"
"dup v12.4s, v19.s[1] \n"
"dup v13.4s, v19.s[1] \n"
"dup v14.4s, v19.s[2] \n"
"dup v15.4s, v19.s[2] \n"
"dup v16.4s, v19.s[2] \n"
"dup v17.4s, v19.s[3] \n"
"dup v18.4s, v19.s[3] \n"
"dup v19.4s, v19.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [%5], #32 \n"
"shll v20.4s, v20.4h, #16 \n"
"shll v21.4s, v21.4h, #16 \n"
"shll v22.4s, v22.4h, #16 \n"
"shll v23.4s, v23.4h, #16 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [%5], #32 \n"
"shll v24.4s, v24.4h, #16 \n"
"shll v25.4s, v25.4h, #16 \n"
"shll v26.4s, v26.4h, #16 \n"
"shll v27.4s, v27.4h, #16 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"shrn v16.4h, v16.4s, #16 \n"
"shrn v17.4h, v17.4s, #16 \n"
"shrn v18.4h, v18.4s, #16 \n"
"shrn v19.4h, v19.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
"st1 {v11.4h, v12.4h, v13.4h}, [%2], #24 \n"
"st1 {v14.4h, v15.4h, v16.4h}, [%3], #24 \n"
"st1 {v17.4h, v18.4h, v19.4h}, [%4], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v15.4s}, [%14] \n"
"dup v8.4s, v15.s[0] \n"
"dup v9.4s, v15.s[0] \n"
"dup v10.4s, v15.s[1] \n"
"dup v11.4s, v15.s[1] \n"
"dup v12.4s, v15.s[2] \n"
"dup v13.4s, v15.s[2] \n"
"dup v14.4s, v15.s[3] \n"
"dup v15.4s, v15.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%5], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"shrn v12.4h, v12.4s, #16 \n"
"shrn v13.4h, v13.4s, #16 \n"
"shrn v14.4h, v14.4s, #16 \n"
"shrn v15.4h, v15.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
"st1 {v10.4h, v11.4h}, [%2], #16 \n"
"st1 {v12.4h, v13.4h}, [%3], #16 \n"
"st1 {v14.4h, v15.4h}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"vld1.f32 {d30-d31}, [%14] \n"
"vdup.f32 q8, d30[0] \n"
"vdup.f32 q9, d30[0] \n"
"vdup.f32 q10, d30[1] \n"
"vdup.f32 q11, d30[1] \n"
"vdup.f32 q12, d31[0] \n"
"vdup.f32 q13, d31[0] \n"
"vdup.f32 q14, d31[1] \n"
"vdup.f32 q15, d31[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d21, q11, #16 \n"
"vshrn.u32 d24, q12, #16 \n"
"vshrn.u32 d25, q13, #16 \n"
"vshrn.u32 d28, q14, #16 \n"
"vshrn.u32 d29, q15, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
"vst1.u16 {d20-d21}, [%2 :64]! \n"
"vst1.u16 {d24-d25}, [%3 :64]! \n"
"vst1.u16 {d28-d29}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v11.4s}, [%14] \n"
"dup v8.4s, v11.s[0] \n"
"dup v9.4s, v11.s[1] \n"
"dup v10.4s, v11.s[2] \n"
"dup v11.4s, v11.s[3] \n"
"0: \n"
"prfm pldl1keep, [%5, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%5], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"shrn v11.4h, v11.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
"st1 {v9.4h}, [%2], #8 \n"
"st1 {v10.4h}, [%3], #8 \n"
"st1 {v11.4h}, [%4], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d22-d23}, [%14] \n"
"vdup.f32 q8, d22[0] \n"
"vdup.f32 q9, d22[1] \n"
"vdup.f32 q10, d23[0] \n"
"vdup.f32 q11, d23[1] \n"
"0: \n"
"pld [%5, #256] \n"
"vld1.u16 {d4-d7}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d18, q9, #16 \n"
"vshrn.u32 d20, q10, #16 \n"
"vshrn.u32 d22, q11, #16 \n"
"vst1.u16 {d16}, [%1 :64]! \n"
"vst1.u16 {d18}, [%2 :64]! \n"
"vst1.u16 {d20}, [%3 :64]! \n"
"vst1.u16 {d22}, [%4 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"ld1 {v8.4s}, [%14] \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #64] \n"
"ld1 {v0.4h}, [%5], #8 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [%6], #32 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"shll v5.4s, v5.4h, #16 \n"
"shll v6.4s, v6.4h, #16 \n"
"shll v7.4s, v7.4h, #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.h}[0], [%1], #2 \n"
"st1 {v8.h}[1], [%2], #2 \n"
"st1 {v8.h}[2], [%3], #2 \n"
"st1 {v8.h}[3], [%4], #2 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vld1.f32 {d16-d17}, [%14] \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #64] \n"
"vld1.u16 {d1}, [%5]! \n"
"pld [%6, #256] \n"
"vld1.u16 {d12-d15}, [%6]! \n"
"vshll.u16 q0, d1, #16 \n"
"vshll.u16 q4, d12, #16 \n"
"vshll.u16 q5, d13, #16 \n"
"vshll.u16 q6, d14, #16 \n"
"vshll.u16 q7, d15, #16 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16[0]}, [%1]! \n"
"vst1.u16 {d16[1]}, [%2]! \n"
"vst1.u16 {d16[2]}, [%3]! \n"
"vst1.u16 {d16[3]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(outptr1), // %2
"=r"(outptr2), // %3
"=r"(outptr3), // %4
"=r"(tmpptr), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(outptr0),
"2"(outptr1),
"3"(outptr2),
"4"(outptr3),
"5"(tmpptr),
"6"(kptr),
"r"(biasptr) // %14
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
unsigned short* outptr0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
int i = 0;
#if __aarch64__
for (; i + 11 < size; i += 12)
{
unsigned short* tmpptr = tmp.channel(i / 12);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"dup v10.4s, %w8 \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [%2], #32 \n"
"shll v16.4s, v16.4h, #16 \n"
"shll v17.4s, v17.4h, #16 \n"
"shll v18.4s, v18.4h, #16 \n"
"shll v19.4s, v19.4h, #16 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"shrn v10.4h, v10.4s, #16 \n"
"st1 {v8.4h, v9.4h, v10.4h}, [%1], #24 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#endif // __aarch64__
for (; i + 7 < size; i += 8)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"dup v9.4s, %w8 \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [%2], #32 \n"
"shll v12.4s, v12.4h, #16 \n"
"shll v13.4s, v13.4h, #16 \n"
"shll v14.4s, v14.4h, #16 \n"
"shll v15.4s, v15.4h, #16 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"shrn v9.4h, v9.4s, #16 \n"
"st1 {v8.4h, v9.4h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"vdup.f32 q9, %8 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #256] \n"
"vld1.u16 {d28-d31}, [%2]! \n"
"vshll.u16 q12, d28, #16 \n"
"vshll.u16 q13, d29, #16 \n"
"vshll.u16 q14, d30, #16 \n"
"vshll.u16 q15, d31, #16 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vshrn.u32 d16, q8, #16 \n"
"vshrn.u32 d17, q9, #16 \n"
"vst1.u16 {d16-d17}, [%1 :64]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < size; i += 4)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4);
#endif
int nn = inch * maxk; // inch always > 0
#if __aarch64__
asm volatile(
"dup v8.4s, %w8 \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [%2], #32 \n"
"prfm pldl1keep, [%3, #64] \n"
"ld1 {v4.4h}, [%3], #8 \n"
"shll v0.4s, v0.4h, #16 \n"
"shll v1.4s, v1.4h, #16 \n"
"shll v2.4s, v2.4h, #16 \n"
"shll v3.4s, v3.4h, #16 \n"
"shll v4.4s, v4.4h, #16 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"subs %w0, %w0, #1 \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"shrn v8.4h, v8.4s, #16 \n"
"st1 {v8.4h}, [%1], #8 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #256] \n"
"vld1.u16 {d4-d7}, [%2]! \n"
"pld [%3, #64] \n"
"vld1.u16 {d9}, [%3]! \n"
"vshll.u16 q0, d4, #16 \n"
"vshll.u16 q1, d5, #16 \n"
"vshll.u16 q2, d6, #16 \n"
"vshll.u16 q3, d7, #16 \n"
"vshll.u16 q4, d9, #16 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vshrn.u32 d16, q8, #16 \n"
"vst1.u16 {d16}, [%1]! \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr),
"r"(bias0) // %8
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
unsigned short* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
unsigned short* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4);
const unsigned short* kptr = (const unsigned short*)kernel.channel(p / 4 + p % 4);
#endif
int nn = inch * maxk; // inch always > 0
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q = 0; q < nn; q++)
{
float32x4_t _r0 = vcvt_f32_bf16(vld1_u16(tmpptr));
float32x4_t _k0 = vcvt_f32_bf16(vld1_u16(kptr));
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
tmpptr += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
outptr0[0] = float32_to_bfloat16(bias0 + sum0);
outptr0++;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack4to1_bf16s_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4b-4a-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
#if __aarch64__
kernel_tm.create(32 * maxk, inch / 4, outch / 8 + (outch % 8) / 4 + outch % 4, (size_t)2u);
#else
kernel_tm.create(16 * maxk, inch / 4, outch / 4 + outch % 4, (size_t)2u);
#endif
int q = 0;
#if __aarch64__
for (; q + 7 < outch; q += 8)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
const Mat k4 = kernel.channel(q + 4);
const Mat k5 = kernel.channel(q + 5);
const Mat k6 = kernel.channel(q + 6);
const Mat k7 = kernel.channel(q + 7);
unsigned short* g00 = kernel_tm.channel(q / 8);
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
const float* k40 = k4.row(p);
const float* k41 = k4.row(p + 1);
const float* k42 = k4.row(p + 2);
const float* k43 = k4.row(p + 3);
const float* k50 = k5.row(p);
const float* k51 = k5.row(p + 1);
const float* k52 = k5.row(p + 2);
const float* k53 = k5.row(p + 3);
const float* k60 = k6.row(p);
const float* k61 = k6.row(p + 1);
const float* k62 = k6.row(p + 2);
const float* k63 = k6.row(p + 3);
const float* k70 = k7.row(p);
const float* k71 = k7.row(p + 1);
const float* k72 = k7.row(p + 2);
const float* k73 = k7.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k40[k]);
g00[5] = float32_to_bfloat16(k50[k]);
g00[6] = float32_to_bfloat16(k60[k]);
g00[7] = float32_to_bfloat16(k70[k]);
g00[8] = float32_to_bfloat16(k01[k]);
g00[9] = float32_to_bfloat16(k11[k]);
g00[10] = float32_to_bfloat16(k21[k]);
g00[11] = float32_to_bfloat16(k31[k]);
g00[12] = float32_to_bfloat16(k41[k]);
g00[13] = float32_to_bfloat16(k51[k]);
g00[14] = float32_to_bfloat16(k61[k]);
g00[15] = float32_to_bfloat16(k71[k]);
g00[16] = float32_to_bfloat16(k02[k]);
g00[17] = float32_to_bfloat16(k12[k]);
g00[18] = float32_to_bfloat16(k22[k]);
g00[19] = float32_to_bfloat16(k32[k]);
g00[20] = float32_to_bfloat16(k42[k]);
g00[21] = float32_to_bfloat16(k52[k]);
g00[22] = float32_to_bfloat16(k62[k]);
g00[23] = float32_to_bfloat16(k72[k]);
g00[24] = float32_to_bfloat16(k03[k]);
g00[25] = float32_to_bfloat16(k13[k]);
g00[26] = float32_to_bfloat16(k23[k]);
g00[27] = float32_to_bfloat16(k33[k]);
g00[28] = float32_to_bfloat16(k43[k]);
g00[29] = float32_to_bfloat16(k53[k]);
g00[30] = float32_to_bfloat16(k63[k]);
g00[31] = float32_to_bfloat16(k73[k]);
g00 += 32;
}
}
}
#endif // __aarch64__
for (; q + 3 < outch; q += 4)
{
const Mat k0 = kernel.channel(q);
const Mat k1 = kernel.channel(q + 1);
const Mat k2 = kernel.channel(q + 2);
const Mat k3 = kernel.channel(q + 3);
#if __aarch64__
unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4);
#else
unsigned short* g00 = kernel_tm.channel(q / 4);
#endif
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
const float* k10 = k1.row(p);
const float* k11 = k1.row(p + 1);
const float* k12 = k1.row(p + 2);
const float* k13 = k1.row(p + 3);
const float* k20 = k2.row(p);
const float* k21 = k2.row(p + 1);
const float* k22 = k2.row(p + 2);
const float* k23 = k2.row(p + 3);
const float* k30 = k3.row(p);
const float* k31 = k3.row(p + 1);
const float* k32 = k3.row(p + 2);
const float* k33 = k3.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k10[k]);
g00[2] = float32_to_bfloat16(k20[k]);
g00[3] = float32_to_bfloat16(k30[k]);
g00[4] = float32_to_bfloat16(k01[k]);
g00[5] = float32_to_bfloat16(k11[k]);
g00[6] = float32_to_bfloat16(k21[k]);
g00[7] = float32_to_bfloat16(k31[k]);
g00[8] = float32_to_bfloat16(k02[k]);
g00[9] = float32_to_bfloat16(k12[k]);
g00[10] = float32_to_bfloat16(k22[k]);
g00[11] = float32_to_bfloat16(k32[k]);
g00[12] = float32_to_bfloat16(k03[k]);
g00[13] = float32_to_bfloat16(k13[k]);
g00[14] = float32_to_bfloat16(k23[k]);
g00[15] = float32_to_bfloat16(k33[k]);
g00 += 16;
}
}
}
for (; q < outch; q++)
{
const Mat k0 = kernel.channel(q);
#if __aarch64__
unsigned short* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4 + q % 4);
#else
unsigned short* g00 = kernel_tm.channel(q / 4 + q % 4);
#endif
for (int p = 0; p + 3 < inch; p += 4)
{
const float* k00 = k0.row(p);
const float* k01 = k0.row(p + 1);
const float* k02 = k0.row(p + 2);
const float* k03 = k0.row(p + 3);
for (int k = 0; k < maxk; k++)
{
g00[0] = float32_to_bfloat16(k00[k]);
g00[1] = float32_to_bfloat16(k01[k]);
g00[2] = float32_to_bfloat16(k02[k]);
g00[3] = float32_to_bfloat16(k03[k]);
g00 += 4;
}
}
}
}
static void convolution_im2col_sgemm_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 4, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
unsigned short* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const unsigned short* sptr = img.row<const unsigned short>(dilation_h * u) + dilation_w * v * 4;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
uint16x4_t _val0 = vld1_u16(sptr);
uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4);
uint16x4_t _val2 = vld1_u16(sptr + stride_w * 8);
uint16x4_t _val3 = vld1_u16(sptr + stride_w * 12);
vst1_u16(ptr, _val0);
vst1_u16(ptr + 4, _val1);
vst1_u16(ptr + 8, _val2);
vst1_u16(ptr + 12, _val3);
sptr += stride_w * 16;
ptr += 16;
}
for (; j + 1 < outw; j += 2)
{
uint16x4_t _val0 = vld1_u16(sptr);
uint16x4_t _val1 = vld1_u16(sptr + stride_w * 4);
vst1_u16(ptr, _val0);
vst1_u16(ptr + 4, _val1);
sptr += stride_w * 8;
ptr += 8;
}
for (; j < outw; j++)
{
uint16x4_t _val = vld1_u16(sptr);
vst1_u16(ptr, _val);
sptr += stride_w * 4;
ptr += 4;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack4to1_bf16s_neon(bottom_im2col, top_blob, kernel, _bias, opt);
}
|
strip_fmt_plug.c | /* STRIP cracker patch for JtR. Hacked together during September of
* 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_strip;
#elif FMT_REGISTERS_H
john_register_one(&fmt_strip);
#else
#include <string.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "aes.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4 // tuned on core i7
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "STRIP"
#define FORMAT_NAME "Password Manager"
#define FORMAT_TAG "$strip$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define ITERATIONS 4000
#define FILE_HEADER_SZ 16
#define SQLITE_FILE_HEADER "SQLite format 3"
#define HMAC_SALT_MASK 0x3a
#define FAST_PBKDF2_ITER 2
#define SQLITE_MAX_PAGE_SIZE 65536
static struct fmt_tests strip_tests[] = {
/* test vector created by STRIP for Windows */
{"$strip$*66cd7a4ff7716f7b86cf587ce18eb39518e096eb152615ada8d007d9f035c20c711e62cbde96d8c3aad2a4658497a6119addc97ed3c970580cd666f301c63ce041a1748ee5c3861ada3cd6ee75b5d68891f731b3c2e3294b08e10ce3c23c2bfac158f8c45d0332791f64d1e3ad55e936d17a42fef5228e713b8188050c9a61c7f026af6203172cf2fc54c8b439e2260d7a00a4156713f92f8466de5c05cd8701e0d3d9cb3f392ae918e6900d5363886d4e1ed7e90da76b180ef9555c1cd358f6d1ee3755a208fee4d5aa1c776a0888200b21a3da6614d5fe2303e78c09563d862d19deecdc9f0ec7fbc015689a74f4eb477d9f22298b1b3f866ca4cb772d74821a1f8d03fd5fd0d020ffd41dd449b431ddf3bbfba3399311d9827be428202ee56e2c2a4e91f3415b4282c691f16cd447cf877b576ab963ea4ea3dc7d8c433febdc36607fd2372c4165abb59e3e75c28142f1f2575ecca6d97a9f782c3410151f8bbcbc65a42fdc59fdc4ecd8214a2bbd3a4562fac21c48f7fc69a4ecbcf664b4e435d7734fde5494e4d80019a0302e22565ed6a49b29cecf81077fd92f0105d18a421e04ee0deaca6389214abc7182db7003da7e267816531010b236eadfea20509718ff743ed5ad2828b6501dd84a371feed26f0514bbda69118a69048ebb71e3e2c54fb918422f1320724a353fe8d81a562197454d2c67443be8a4008a756aec0998386a5fd48e379befe966b42dfa6684ff049a61b51de5f874a12ab7d9ab33dc84738e036e294c22a07bebcc95be9999ab988a1fa1c944ab95be970045accb661249be8cc34fcc0680cb1aff8dfee21f586c571b1d09bf370c6fc131418201e0414acb2e4005b0b6fda1f3d73b7865823a008d1d3f45492a960dbdd6331d78d9e2e6a368f08ee3456b6d78df1d5630f825c536fff60bad23fb164d151d80a03b0c78edbfdee5c7183d7527e289428cf554ad05c9d75011f6b233744f12cd85fbb62f5d1ae22f43946f24a483a64377bf3fa16bf32cea1ab4363ef36206a5989e97ff847e5d645791571b9ecd1db194119b7663897b9175dd9cc123bcc7192eaf56d4a2779c502700e88c5c20b962943084bcdf024dc4f19ca649a860bdbd8f8f9b4a9d03027ae80f4a3168fc030859acb08a871950b024d27306cdc1a408b2b3799bb8c1f4b6ac3593aab42c962c979cd9e6f59d029f8d392315830cfcf4066bf03e0fc5c0f3630e9c796ddb38f51a2992b0a61d6ef115cb34d36c7d94b6c9d49dfe8d064d92b483f12c14fa10bf1170a575e4571836cef0a1fbf9f8b6968abda5e964bb16fd62fde1d1df0f5ee9c68ce568014f46f1717b6cd948b0da9a6f4128da338960dbbcbc9c9c3b486859c06e5e2338db3458646054ccd59bb940c7fc60cda34f633c26dde83bb717b75fefcbd09163f147d59a6524752a47cd94", "openwall"},
/* test vector created by STRIP Password Manager (for Android) */
{"$strip$*78adb0052203efa1bd1b02cac098cc9af1bf7e84ee2eaebaaba156bdcfe729ab12ee7ba8a84e79d11dbd67eee82bcb24be99dbd5db7f4c3a62f188ce4b48edf4ebf6cbf5a5869a61f83fbdb3cb4bf79b3c2c898f422d71eab31afdf3a8d4e97204dedbe7bd8b5e4c891f4880ca917c8b2f67ca06035e7f8db1fae91c45db6a08adf96ec5ddcb9e60b648acf883a7550ea5b67e2d27623e8de315f29cba48b8b1d1bde62283615ab88293b29ad73ae404a42b13e35a95770a504d81e335c00328a6290e411fa2708a697fab7c2d17ff5d0a3fe508118bb43c3d5e72ef563e0ffd337f559085a1373651ca2b8444f4437d8ac0c19aa0a24b248d1d283062afbc3b4ccc9b1861f59518eba771f1d9707affe0222ff946da7c014265ab4ba1f6417dd22d92e4adf5b7e462588f0a42e061a3dad041cbb312d8862aed3cf490df50b710a695517b0c8771a01f82db09231d392d825f5667012e349d2ed787edf8448bbb1ff548bee3a33392cd209e8b6c1de8202f6527d354c3858b5e93790c4807a8967b4c0321ed3a1d09280921650ac33308bd04f35fb72d12ff64a05300053358c5d018a62841290f600f7df0a7371b6fac9b41133e2509cb90f774d02e7202185b9641d063ed38535afb81590bfd5ad9a90107e4ff6d097ac8f35435f307a727f5021f190fc157956414bfce4818a1e5c6af187485683498dcc1d56c074c534a99125c6cfbf5242087c6b0ae10971b0ff6114a93616e1a346a22fcac4c8f6e5c4a19f049bbc7a02d2a31d39548f12440c36dbb253299a11b630e8fd88e7bfe58545d60dce5e8566a0a190d816cb775bd859b8623a7b076bce82c52e9cff6a2d221f9d3fd888ac30c7e3000ba8ed326881ffe911e27bb8982b56caa9a12065721269976517d2862e4a486b7ed143ee42c6566bba04c41c3371220f4843f26e328c33a5fb8450dadc466202ffc5c49cc95827916771e49e0602c3f8468537a81cf2fa1db34c090fccab6254436c05657cf29c3c415bb22a42adeac7870858bf96039b81c42c3d772509fdbe9a94eaf99ee9c59bac3ea97da31e9feac14ed53a0af5c5ebd2e81e40a5140da4f8a44048d5f414b0ba9bfb8024c7abaf5346fde6368162a045d1196f81d55ed746cc6cbd7a7c9cdbfa392279169626437da15a62730c2990772e106a5b84a60edaa6c5b8030e1840aa6361f39a12121a1e33b9e63fb2867d6241de1fb6e2cd1bd9a78c7122258d052ea53a4bff4e097ed49fc17b9ec196780f4c6506e74a5abb10c2545e6f7608d2eefad179d54ad31034576be517affeb3964c65562538dd6ea7566a52c75e4df593895539609a44097cb6d31f438e8f7717ce2bf777c76c22d60b15affeb89f08084e8f316be3f4aefa4fba8ec2cc1dc845c7affbc0ce5ebccdbfde5ebab080a285f02bdfb76c6dbd243e5ee1e5d", "p@$$w0rD"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char salt[16];
unsigned char data[1024];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip over "$strip$" and first '*' */
if ((p = strtokm(ctcopy, "*")) == NULL) /* salt + data */
goto err;
if (hexlenl(p, &extra) != 2048 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$strip$" and first '*' */
p = strtokm(ctcopy, "*");
for (i = 0; i < 16; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
for (; i < 1024; i++)
cs.data[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/* verify validity of page */
static int verify_page(unsigned char *page1)
{
uint32_t pageSize;
uint32_t usableSize;
//if (memcmp(page1, SQLITE_FILE_HEADER, 16) != 0) {
// return -1;
//}
if (page1[19] > 2) {
return -1;
}
if (memcmp(&page1[21], "\100\040\040", 3) != 0) {
return -1;
}
pageSize = (page1[16] << 8) | (page1[17] << 16);
if (((pageSize - 1) & pageSize) != 0 || pageSize > SQLITE_MAX_PAGE_SIZE || pageSize <= 256) {
return -1;
}
if ((pageSize & 7) != 0) {
return -1;
}
usableSize = pageSize - page1[20];
if (usableSize < 480) {
return -1;
}
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
unsigned char output[24];
unsigned char *iv_in;
unsigned char iv_out[16];
int size,i;
int page_sz = 1008; /* 1024 - strlen(SQLITE_FILE_HEADER) */
int reserve_sz = 16; /* for HMAC off case */
AES_KEY akey;
#ifdef SIMD_COEF_32
int len[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
len[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char **)pin, len, cur_salt->salt, 16, ITERATIONS, pout, 32, 0);
#else
pbkdf2_sha1((unsigned char *)saved_key[index],
strlen(saved_key[index]), cur_salt->salt,
16, ITERATIONS, master[0], 32, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
//memcpy(output, SQLITE_FILE_HEADER, FILE_HEADER_SZ);
size = page_sz - reserve_sz;
iv_in = cur_salt->data + size + 16;
memcpy(iv_out, iv_in, 16);
AES_set_decrypt_key(master[i], 256, &akey);
/*
* decrypting 8 bytes from offset 16 is enough since the
* verify_page function looks at output[16..23] only.
*/
AES_cbc_encrypt(cur_salt->data + 16, output + 16, 8, &akey, iv_out, AES_DECRYPT);
if (verify_page(output) == 0) {
cracked[index+i] = 1;
}
else
cracked[index+i] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void strip_set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, sizeof(*saved_key));
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_strip = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
strip_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
strip_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.