source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_binop__ne_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_01__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_03__ne_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int32)
// A*D function (colscale): GB (_AxD__ne_int32)
// D*A function (rowscale): GB (_DxB__ne_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int32)
// C=scalar+B GB (_bind1st__ne_int32)
// C=scalar+B' GB (_bind1st_tran__ne_int32)
// C=A+scalar GB (_bind2nd__ne_int32)
// C=A'+scalar GB (_bind2nd_tran__ne_int32)
// C type: bool
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT32 || GxB_NO_NE_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ne_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
phase.h | //
// Created by mattw on 13/01/2022.
//
#ifndef BECPP_PHASE_H
#define BECPP_PHASE_H
#include <iostream>
#include <tuple>
#include <vector>
#include <random>
#include <chrono>
#include <cmath>
#include "grid.h"
#include "constants.h"
inline double heaviside(double x)
{
return (x > 0) ? 1 : ((x < 0) ? 0 : 1);
}
std::vector<std::tuple<double, double>> generate_positions(const int n_vort, const double threshold, const Grid2D &grid,
const int max_iter)
{
std::cout << "Finding " << n_vort << " vortex positions...\n";
std::vector<std::tuple<double, double>> positions;
int iterations = 0;
// Construct random generator
unsigned seed = std::chrono::system_clock::now().time_since_epoch().count();
std::default_random_engine generator(seed);
std::uniform_real_distribution<double> uniform_distribution(-grid.len_x / 2, grid.len_x / 2);
while (positions.size() < n_vort)
{
std::tuple<double, double> pos;
pos = std::make_tuple(uniform_distribution(generator), uniform_distribution(generator));
iterations += 1;
bool triggered{false};
for (const auto &accepted_pos: positions)
{
if (std::abs(std::get<0>(pos) - std::get<0>(accepted_pos)) < threshold)
{
if (std::abs(std::get<1>(pos) - std::get<1>(accepted_pos)) < threshold)
{
triggered = true;
break;
}
}
}
if (!triggered)
{
positions.push_back(pos);
}
// If iterations exceed the maximum, return the current position list and continue
if (iterations > max_iter)
{
std::cout << "WARNING: Max iterations exceeded, only found " << positions.size() << " suitable positions\n";
return positions;
}
}
std::cout << "Found " << n_vort << " positions in " << iterations << " iterations\n";
return positions;
}
doubleArray_t construct_phase(const int n_vort, const double threshold, const Grid2D &grid, const int max_iter = 10000)
{
std::cout << "Commencing construction of phase:\n";
std::vector<std::tuple<double, double>> positions = generate_positions(n_vort, threshold, grid, max_iter);
std::cout << "Constructing phase profile array...\n";
// Phase array
doubleArray_t theta;
theta.resize(grid.nx, std::vector<double>(grid.ny));
for (int num = 0; num < n_vort / 2; ++num)
{
doubleArray_t theta_k;
theta_k.resize(grid.nx, std::vector<double>(grid.ny));
// Extract positions
auto[x_m, y_m] = positions[num];
auto[x_p, y_p] = positions[n_vort / 2 + num];
double x_m_tilde = 2 * PI * ((x_m + grid.len_x) / grid.len_x);
double y_m_tilde = 2 * PI * ((y_m + grid.len_y) / grid.len_y);
double x_p_tilde = 2 * PI * ((x_p + grid.len_x) / grid.len_x);
double y_p_tilde = 2 * PI * ((y_p + grid.len_y) / grid.len_y);
#pragma omp parallel for collapse(2) shared(grid, theta, theta_k, y_m_tilde, x_m_tilde, y_p_tilde, x_p_tilde) default(none)
for (int i = 0; i < grid.nx; ++i)
{
for (int j = 0; j < grid.ny; ++j)
{
double x_tilde = 2 * PI * ((grid.X[i][j] + grid.len_x) / grid.len_x);
double y_tilde = 2 * PI * ((grid.Y[i][j] + grid.len_x) / grid.len_x);
// Aux variables
double Y_minus = y_tilde - y_m_tilde;
double X_minus = x_tilde - x_m_tilde;
double Y_plus = y_tilde - y_p_tilde;
double X_plus = x_tilde - x_p_tilde;
double heav_xp = heaviside(X_plus);
double heav_xm = heaviside(X_minus);
for (int k = -5; k < 6; ++k)
{
theta_k[i][j] += std::atan(std::tanh((Y_minus + 2 * PI * k) / 2)
* std::tan((X_minus - PI) / 2))
- std::atan(std::tanh((Y_plus + 2 * PI * k) / 2)
* std::tan((X_plus - PI) / 2))
+ PI * (heav_xp - heav_xm);
}
theta_k[i][j] -= y_tilde * (x_p_tilde - x_m_tilde) / (2 * PI);
theta[i][j] += theta_k[i][j];
}
}
}
std::cout << "Phase constructed!\n";
return theta;
}
#endif //BECPP_PHASE_H
|
gmx_isd.c | /*
*
* Tim Connolly - tconnolly@ucmerced.edu
* Copyright (c) 2014, Regents of the University of California
* Released under BSD 2-Clause License (see "LICENSE" file)
*
* This code was modified from the file src/tools/gmx_gyrate.c
*/
#include <math.h>
#include <string.h>
#include "libisdm.h"
#include "eigensolver.h"
#include <gromacs/statutil.h>
#include <gromacs/sysstuff.h>
#include <gromacs/typedefs.h>
#include <gromacs/smalloc.h>
#include <gromacs/macros.h>
#include <gromacs/vec.h>
#include <gromacs/pbc.h>
#include <gromacs/copyrite.h>
#include <gromacs/futil.h>
#include <gromacs/statutil.h>
#include <gromacs/index.h>
#include <gromacs/mshift.h>
#include <gromacs/xvgr.h>
#include <gromacs/rmpbc.h>
#include <gromacs/txtdump.h>
#include <gromacs/tpxio.h>
#include <gromacs/gstat.h>
#include <gromacs/gmx_ana.h>
void mat_mult_mat(real* mat1, real* mat2, int m, int n, int o, real* out, gmx_bool bMP)
{
/* Assume a is an array of doubles m by n in dimensions.
* Assume b is an array of doubles n by o in dimensions.
* Out should point to enough memory to store m by o doubles.
*/
int i, percent_calcs, finished_calcs;
percent_calcs = 1;
finished_calcs = 0;
#pragma omp parallel for schedule(dynamic) if (bMP)
for (i = 0; i < m; i++) {
int j, k;
for (k = 0; k < o; k++) {
out[(i * m) + k] = 0;
for (j = 0; j < n; j++) {
out[(i * m) + k] += mat1[(i * m) + j] * mat2[(j * n) + k];
}
}
// Output progress. OpenMP critical section.
#pragma omp critical
{
finished_calcs++;
while ((double)(finished_calcs) / m >= (double)percent_calcs / 100)
{
fprintf(stderr, "Approximately %i percent complete. \r", percent_calcs);
percent_calcs++;
}
} // End of OpenMP critical section.
} // End of OpenMP parallel for loop.
fprintf(stderr, "\n");
}
void scl_mult_mat(real scl, real* mat, int m, int n, real* out)
{
/* Multiply all elements of matrix mat by scalar scl.
*
* This should still work even if mat and out point to the same thing.
*/
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
out[(i * m) + j] = scl * mat[(i * m) + j];
}
}
}
void mat_transpose(real* mat, int m,int n, real* out)
{
/* Transpose m by n array of reals a into n by m array of reals out.
*/
int i, j;
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
out[(j * n) + i] = mat[(i * m) + j];
}
}
}
void calc_EISD(real** MDS, int nframes, int d, real** EISD)
{
/* Calculate the approximate ISD from the euclidean dimensionally reduced
* coordinates in MDS. Uses the number of dimensions specified by d. The
* number of structures is specified by nframes, and EISD should be a
* matrix of nframes x nframes.
*/
int i, j, k;
real kEISD;
for (i = 0; i < nframes; i++)
{
for (j = 0; j < nframes; j++)
{
// Same structure.
if (i == j)
{
EISD[i][j] = 0.0;
continue;
}
// Different structures.
EISD[i][j] = 0.0;
for (k = 0; k < d; k++)
{
kEISD = MDS[i][k] - MDS[j][k];
EISD[i][j] += kEISD * kEISD;
}
EISD[i][j] = sqrt(EISD[i][j]);
}
}
}
real calc_rcc(real** ISD, real** EISD, int nframes)
{
int i, j;
int N = nframes * (nframes - 1) / 2;
double sCov, sISD, sEISD, sISD2, sEISD2, mISD, mEISD, vISD, vEISD;
sCov = 0.0; sISD = 0.0; sEISD = 0.0; sISD2 = 0.0; sEISD2 = 0.0;
// Variances, means, and the means of squares.
for (i = 0; i < (nframes - 1); i++)
{
for (j = (i + 1); j < nframes; j++)
{
sISD += ISD[i][j];
sEISD += EISD[i][j];
sISD2 += ISD[i][j] * ISD[i][j];
sEISD2 += EISD[i][j] * EISD[i][j];
}
}
mISD = sISD / N;
mEISD = sEISD / N;
vISD = (sISD2 / N) - (mISD * mISD);
vEISD = (sEISD2 / N) - (mEISD * mEISD);
// Covariance.
for (i = 0; i < (nframes - 1); i++)
{
for (j = (i + 1); j < nframes; j++)
{
sCov += (ISD[i][j] - mISD) * (EISD[i][j] - mEISD);
}
}
// Correlation coefficient, R.
return (sCov / N) / (sqrt(vISD) * sqrt(vEISD));
}
int gmx_isd(int argc,char *argv[])
{
const char *desc[] = {
"[TT]g_isd[tt]."
};
static gmx_bool bANG=FALSE, bDIH=FALSE, bANGDIH=FALSE, bDRMS=FALSE;
static gmx_bool bPHIPSI=FALSE, bSRMS=FALSE, bPCOR=FALSE, bMAMMOTH=FALSE;
static gmx_bool bACOR=FALSE, bESA=FALSE, bRMSD=FALSE, bMIR=FALSE;
static gmx_bool bRG=FALSE, bSRG=FALSE, bE2E=FALSE, bSE2E=FALSE;
static gmx_bool bANG2=FALSE, bDIH2=FALSE, bANGDIH2=FALSE, bANGDIH2G=FALSE;
static gmx_bool bRROT=FALSE, bSDRMS=FALSE, bPHIPSI2=FALSE, bRMSDIH=FALSE;
static user_bf = -1, user_ef = -1, user_td = -1;
static int nt = -1;
static real setmax = -1.0;
static real rcutoff = 1.1;
static real noisefloor = 0.0;
static gmx_bool bNoise = FALSE;
static gmx_bool bMP = FALSE;
t_pargs pa[] = {
{ "-ang", FALSE, etBOOL, {&bANG},
"ISDM: Mean cosine of difference of backbone angles for each "
"set of three atoms. Assumes only CA atoms." },
{ "-dih", FALSE, etBOOL, {&bDIH},
"ISDM: Mean cosine of difference of backbone dihedrals for "
"each set of four atoms. Assumes only CA atoms." },
{ "-angdih", FALSE, etBOOL, {&bANGDIH},
"ISDM: Geometric mean of ang and dih ISDMs." },
{ "-ang2", FALSE, etBOOL, {&bANG2},
"ISDM: Attempts to euclideanize -ang." },
{ "-dih2", FALSE, etBOOL, {&bDIH2},
"ISDM: Attempts to euclideanize -dih." },
{ "-angdih2", FALSE, etBOOL, {&bANGDIH2},
"ISDM: Attempts to euclideanize -angdih." },
{ "-angdih2g", FALSE, etBOOL, {&bANGDIH2G},
"ISDM: Attempts to euclideanize -angdih. Geometric mean." },
{ "-rmsdih", FALSE, etBOOL, {&bRMSDIH},
"ISDM: RMSD of alpha carbon dihedrals." },
{ "-phipsi", FALSE, etBOOL, {&bPHIPSI},
"ISDM: Mean cosine of difference of phi and psi angles. "
"Assumes only backbone atoms." },
{ "-phipsi2", FALSE, etBOOL, {&bPHIPSI2},
"ISDM: Attempts to euclideanize -phipsi." },
{ "-drms", FALSE, etBOOL, {&bDRMS},
"ISDM: Mean difference of the paired distances matrix for all "
"atoms. Distance RMS(D)." },
{ "-sdrms", FALSE, etBOOL, {&bSDRMS},
"ISDM: Mean difference of the paired distances matrix for all "
"atoms scaled by 2 * geometric mean of Rg. Scaled distance "
"RMS(D)." },
{ "-rg", FALSE, etBOOL, {&bRG},
"ISDM: Calculates difference in Rg. Only compares size. " },
{ "-srg", FALSE, etBOOL, {&bSRG},
"ISDM: Calculates difference in Rg scaled by mean Rg. " },
{ "-e2e", FALSE, etBOOL, {&bE2E},
"ISDM: Calculates difference in end-to-end distance. " },
{ "-se2e", FALSE, etBOOL, {&bSE2E},
"ISDM: Calculates difference in end-to-end distance scaled "
"by (2 * Rg). " },
{ "-mir", FALSE, etBOOL, {&bMIR},
"ISDM: RMSD with the mirror of the reference structure. " },
{ "-rrot", FALSE, etBOOL, {&bRROT},
"ISDM: RMSD with random rotation of reference structure. " },
{ "-srms", FALSE, etBOOL, {&bSRMS},
"ISDM: Scaled RMSD. RMSD between the structure and reference "
"divided by the RMSD between the structure and mirror of the "
"reference created by multiplying the coordinates by the "
"negative identity matrix." },
{ "-rmsd", FALSE, etBOOL, {&bRMSD},
"ISDM: Standard RMSD." },
{ "-pcor", FALSE, etBOOL, {&bPCOR},
"ISDM: Position correlation. Correlation coefficient of the "
"positions is computed after alignment. Only positive "
"correlation is considered. Negative correlations are set to "
"zero." },
{ "-acor", FALSE, etBOOL, {&bACOR},
"ISDM: Angle correlation. Correlation coefficient of the "
"backbone angles (see ang ISDM) is computed. "
"Only positive correlation is considered. Negative correlations "
"are set to zero." },
{ "-mammoth", FALSE, etBOOL, {&bMAMMOTH},
"ISDM: MAMMOTH (MAtching Molecular Models Obtained from "
"Theory). Compares segments of residues chosen by sequence "
"alignment. Attempts to focus on correct secondary structure "
"moreso than tertiary structure. Source code modified for "
"compatibility. For this ISDM, please cite: \n\n"
"Ortiz, AR, Strauss, CE, Olmea, O (2002). MAMMOTH "
"(Matching molecular models obtained from theory): An automated "
"method for model comparison. Protein Sci. 11 (11), 2606–2621.\n"},
{ "-esa", FALSE, etBOOL, {&bESA},
"ISDM: Elastic shape analysis. Based on image analysis. "
"Warps structure onto the reference structure. Original source "
"code ported from Matlab to C. For this ISDM, please cite: \n\n"
"Liu W, Srivastava A, Zhang J (2011) A Mathematical Framework "
"for Protein Structure Comparison. PLoS Comput Biol 7(2): "
"e1001075.\n\nAssume only CA atoms." },
{ "-mp", FALSE, etBOOL, {&bMP},
"Use OpenMP commands for parallel processing. "},
{ "-nt", FALSE, etINT, {&nt},
"Limit the maximum number of threads for parallel processing. "},
{ "-bf", FALSE, etINT, {&user_bf},
"Compare range of frames from bf to ef to all other "
"frames. The bf and ef options are applied after the b, "
"e, and dt options and use units of frames instead of units of "
"time. Frame numbers are counted from one."},
{ "-ef", FALSE, etINT, {&user_ef},
"Compare range of frames from bf to ef to all other "
"frames. The bf and ef options are applied after the b, "
"e, and dt options and use units of frames instead of units of "
"time. Frame numbers are counted from one."},
{ "-td", FALSE, etINT, {&user_td},
"Number of frames used for the time difference of -tdo output. " },
{ "-noise", FALSE, etBOOL, {&bNoise},
"If this flag is set, additional information is sent to "
"stdout. The tool calculates the number of positive eigenvalues "
"and the number of positive eigenvalues that can be accounted "
"by two sources of noise. (1) Algorithmic noise based on the "
"negative eigenvalues, (2) thermal noise based on the expected "
"variation of folded proteins, and (3) the combined noise. "
"An estimate of thermal noise can be set manually with the "
"option -noisefloor." },
{ "-setmax", FALSE, etREAL, {&setmax},
"Set maximum value to threshold the xpm file. Must be greater "
"than the average inter-structure distance." },
{ "-rcutoff", FALSE, etREAL, {&rcutoff},
"Set cutoff value for the correlation coefficient. Only applies "
"if the -rcc output is set. The correlation coefficient (R) "
"will be calculated for each dimensional until rcutoff is "
"reached. The value should be between 0 and 1." },
{ "-noisefloor", FALSE, etREAL, {&noisefloor},
"Only applies if the -noise option is set. Manually sets the "
"the estimate of thermal noise used by the dimensionality "
"estimator." },
};
FILE *out;
t_trxstatus *status;
t_topology top;
int ePBC;
rvec *x, **frames;
real *nweights, *iweights, abscoor, maxcoor;
real *diff, **ISDmat, *P2, *J, *P2J, *B, *BT, *E, *V, *MDSa;
real **Va, **MDS, **EISD, *EISDm, Rcc, sumne, cumpe;
double *avgdiff, *maxdiff, avgISD, maxISD;
matrix box;
real t, t1, t2, dt, xpm_max, pi = 3.14159265358979;
int *maxframe, *rnum, maxcoori, bf, ef;
int i, k, m, n, p, np, d, iatoms, natoms, nframes, nframes2, nf2;
int percent_calcs, finished_calcs, noptions;
gmx_bool bDFLT, bFit, bMDS, bEig, bVec, bRcc, bMRg, bDRg, bPy, bM;
gmx_bool bAvg, bVar, bMax, bPair, bRef, bSens, bSNR, bTD;
gmx_bool bMap, bISD, bISDMat, bDCR, bVDCR, bCalcDCR, bMinDCR, bMaxDCR;
gmx_bool bAvgSCL, bMaxSCL;
char buf[256];
char *ISDM, *grpname, title[256], title2[256], *rname;
atom_id *index;
output_env_t oenv;
gmx_rmpbc_t gpbc=NULL;
const char *leg[] = { "D" };
#define NLEG asize(leg)
t_filenm fnm[] = {
{ efTRX, "-f", NULL, ffREAD },
{ efTPS, NULL, NULL, ffREAD },
{ efNDX, NULL, NULL, ffOPTRD },
// Calc options.
{ efXVG, "-avg", "avg", ffOPTWR },
{ efXVG, "-var", "var", ffOPTWR },
{ efXVG, "-max", "max", ffOPTWR },
{ efXVG, "-pair", "pair", ffOPTWR },
{ efXVG, "-ref", "ref", ffOPTWR },
{ efXPM, "-map", "map", ffOPTWR },
{ efDAT, "-isd", "isdcsv", ffOPTWR },
{ efXVG, "-decorr", "decorr", ffOPTWR },
{ efXVG, "-mindcr", "mindcr", ffOPTWR },
{ efXVG, "-maxdcr", "maxdcr", ffOPTWR },
{ efXVG, "-avgscl", "avgscl", ffOPTWR },
{ efXVG, "-maxscl", "maxscl", ffOPTWR },
{ efXVG, "-vdcr", "vdcr", ffOPTWR },
{ efXVG, "-snr", "snr", ffOPTWR },
{ efXVG, "-tdo", "tdo", ffOPTWR },
{ efXVG, "-sens", "sens", ffOPTWR },
// CMDS options.
{ efXVG, "-eig", "eigvals", ffOPTWR },
{ efXVG, "-rcc", "corrcoef", ffOPTWR },
{ efXVG, "-mrg", "mrgcorr", ffOPTWR },
{ efXVG, "-drg", "drgcorr", ffOPTWR },
{ efDAT, "-vec", "eigvecs", ffOPTWR },
{ efDAT, "-mds", "mdscsv", ffOPTWR },
{ efDAT, "-py", "mayapy", ffOPTWR },
{ efDAT, "-m", "disp6D", ffOPTWR },
};
#define NFILE asize(fnm)
int npargs;
CopyRight(stderr,argv[0]);
npargs = asize(pa);
// Lots of black magic with this one. The oenv is used by many things.
parse_common_args(&argc,argv,PCA_CAN_TIME | PCA_CAN_VIEW | PCA_BE_NICE,
NFILE,fnm,npargs,pa,asize(desc),desc,0,NULL,&oenv);
// Output which files?
// Calc
bAvg = opt2bSet("-avg", NFILE, fnm);
bVar = opt2bSet("-var", NFILE, fnm);
bMax = opt2bSet("-max", NFILE, fnm);
bPair = opt2bSet("-pair", NFILE, fnm);
bRef = opt2bSet("-ref", NFILE, fnm);
bMap = opt2bSet("-map", NFILE, fnm);
bISD = opt2bSet("-isd", NFILE, fnm);
bDCR = opt2bSet("-decorr", NFILE, fnm);
bMinDCR = opt2bSet("-mindcr", NFILE, fnm);
bMaxDCR = opt2bSet("-maxdcr", NFILE, fnm);
bAvgSCL = opt2bSet("-avgscl", NFILE, fnm);
bMaxSCL = opt2bSet("-maxscl", NFILE, fnm);
bVDCR = opt2bSet("-vdcr", NFILE, fnm);
bSNR = opt2bSet("-snr", NFILE, fnm);
bTD = opt2bSet("-tdo", NFILE, fnm);
bSens = opt2bSet("-sens", NFILE, fnm);
// CMDS
bEig = opt2bSet("-eig", NFILE, fnm);
bRcc = opt2bSet("-rcc", NFILE, fnm);
bMRg = opt2bSet("-mrg", NFILE, fnm);
bDRg = opt2bSet("-drg", NFILE, fnm);
bVec = opt2bSet("-vec", NFILE, fnm);
bMDS = opt2bSet("-mds", NFILE, fnm);
bPy = opt2bSet("-py", NFILE, fnm);
bM = opt2bSet("-m", NFILE, fnm);
// If there are no options at command line, do default behavior.
bDFLT = !(bANG || bDIH || bANGDIH || bPHIPSI || bDRMS || bSRMS || bRMSD ||
bPCOR || bACOR || bMAMMOTH || bESA || bRG || bSRG || bE2E ||
bSE2E || bMIR || bRROT || bSDRMS || bANG2 || bDIH2 ||
bANGDIH2 || bPHIPSI2 || bANGDIH2G || bRMSDIH);
bFit = (bDFLT || bRMSD || bMIR || bSRMS || bPCOR);
#ifdef _OPENMP
if (nt > 0)
{
omp_set_num_threads(nt);
}
#endif
/* Reads the tpr file. Outputs a ton of info.
*
* I think this is the line that forces you to have a -s at prompt.
*/
read_tps_conf(ftp2fn(efTPS, NFILE, fnm), title, &top, &ePBC, &x, NULL, box, TRUE);
// Asks you to choose a selection of atoms at prompt.
get_index(&top.atoms, ftp2fn_null(efNDX, NFILE, fnm), 1, &iatoms, &index, &grpname);
// For error checking.
noptions = 0;
// Check which ISDM will be used. Default is RMSD.
if (bDFLT || bRMSD)
{
fprintf(stderr,"\nUsing RMSD as ISDM.\n");
ISDM = "RMSD";
noptions++;
}
if (bANG)
{
fprintf(stderr,"\nUsing backbone angles as ISDM.\n");
ISDM = "ANG";
noptions++;
}
if (bDIH)
{
fprintf(stderr,"\nUsing backbone dihedrals as ISDM.\n");
ISDM = "DIH";
noptions++;
}
if (bANG2)
{
fprintf(stderr,"\nUsing backbone angles as ISDM.\n");
ISDM = "ANG2";
noptions++;
}
if (bDIH2)
{
fprintf(stderr,"\nUsing backbone dihedrals as ISDM.\n");
ISDM = "DIH2";
noptions++;
}
if (bANGDIH)
{
fprintf(stderr,"\nUsing geometric mean of angles and dihedrals as ISDM.\n");
ISDM = "ANGDIH";
noptions++;
}
if (bANGDIH2)
{
fprintf(stderr,"\nUsing geometric mean of angles and dihedrals as ISDM.\n");
ISDM = "ANGDIH2";
noptions++;
}
if (bANGDIH2G)
{
fprintf(stderr,"\nUsing geometric mean of angles and dihedrals as ISDM.\n");
ISDM = "ANGDIH2G";
noptions++;
}
if (bANGDIH)
{
fprintf(stderr,"\nUsing RMSD of backbone dihedrals as ISDM.\n");
ISDM = "RMSDIH";
noptions++;
}
if (bPHIPSI)
{
fprintf(stderr,"\nUsing phi and psi angles as ISDM.\n");
ISDM = "PHIPSI";
noptions++;
}
if (bPHIPSI2)
{
fprintf(stderr,"\nUsing phi and psi angles as ISDM.\n");
ISDM = "PHIPSI2";
noptions++;
}
if (bDRMS)
{
fprintf(stderr,"\nUsing distance RMS as ISDM.\n");
ISDM = "DRMS";
noptions++;
}
if (bSDRMS)
{
fprintf(stderr,"\nUsing scaled distance RMS as ISDM.\n");
ISDM = "SDRMS";
noptions++;
}
if (bRG)
{
fprintf(stderr,"\nUsing Rg difference as ISDM.\n");
ISDM = "RG";
noptions++;
}
if (bSRG)
{
fprintf(stderr,"\nUsing scaled Rg difference as ISDM.\n");
ISDM = "SRG";
noptions++;
}
if (bE2E)
{
fprintf(stderr,"\nUsing end-to-end distance as ISDM.\n");
ISDM = "E2E";
noptions++;
}
if (bSE2E)
{
fprintf(stderr,"\nUsing scaled end-to-end distance as ISDM.\n");
ISDM = "SE2E";
noptions++;
}
if (bMIR)
{
fprintf(stderr,"\nUsing mirrored RMSD as ISDM.\n");
ISDM = "MIR";
noptions++;
}
if (bSRMS)
{
fprintf(stderr,"\nUsing scaled RMSD as ISDM.\n");
ISDM = "SRMS";
noptions++;
}
if (bPCOR)
{
fprintf(stderr,"\nUsing position correlation as ISDM.\n");
ISDM = "PCOR";
noptions++;
}
if (bACOR)
{
fprintf(stderr,"\nUsing backbone angle correlation as ISDM.\n");
ISDM = "ACOR";
noptions++;
}
if (bRROT)
{
fprintf(stderr,"\nUsing RMSD with random rotation as ISDM.\n");
noptions++;
// Additional stuff for option.
srand(time(NULL));
}
if (bMAMMOTH)
{
fprintf(stderr,"\nUsing MAMMOTH comparison as ISDM.\n");
noptions++;
// Additional stuff for option.
snew(rnum,iatoms);
//printf(stderr,"\nOutput sequence (tool).\n\n");
for (i = 0; i < iatoms; i++)
{
rname = *(top.atoms.resinfo[top.atoms.atom[index[i]].resind].name);
// Convert to integers.
if (!(strcmp(rname, "ALA")))
{
rnum[i] = 0;
}
else if (!(strcmp(rname, "CYS")))
{
rnum[i] = 1;
}
else if (!(strcmp(rname, "ASP")))
{
rnum[i] = 2;
}
else if (!(strcmp(rname, "GLU")))
{
rnum[i] = 3;
}
else if (!(strcmp(rname, "PHE")))
{
rnum[i] = 4;
}
else if (!(strcmp(rname, "GLY")))
{
rnum[i] = 5;
}
else if (!(strcmp(rname, "HIS")) || !(strcmp(rname, "HID")) ||
!(strcmp(rname, "HIE")) || !(strcmp(rname, "HIP")) ||
!(strcmp(rname, "HSD")) || !(strcmp(rname, "HSE")) ||
!(strcmp(rname, "HSP")))
{
rnum[i] = 6;
}
else if (!(strcmp(rname, "ILE")))
{
rnum[i] = 7;
}
else if (!(strcmp(rname, "LYS")))
{
rnum[i] = 8;
}
else if (!(strcmp(rname, "LEU")))
{
rnum[i] = 9;
}
else if (!(strcmp(rname, "MET")))
{
rnum[i] = 10;
}
else if (!(strcmp(rname, "ASN")))
{
rnum[i] = 11;
}
else if (!(strcmp(rname, "PRO")))
{
rnum[i] = 12;
}
else if (!(strcmp(rname, "GLN")))
{
rnum[i] = 13;
}
else if (!(strcmp(rname, "ARG")))
{
rnum[i] = 14;
}
else if (!(strcmp(rname, "SER")))
{
rnum[i] = 15;
}
else if (!(strcmp(rname, "THR")))
{
rnum[i] = 16;
}
else if (!(strcmp(rname, "VAL")))
{
rnum[i] = 17;
}
else if (!(strcmp(rname, "TRP")))
{
rnum[i] = 18;
}
else if (!(strcmp(rname, "TYR")))
{
rnum[i] = 19;
}
else
{
rnum[i] = 20;
}
}
}
if (bESA)
{
fprintf(stderr,"\nUsing ESA comparison as ISDM.\n"
"For this ISDM, please cite: \n\n"
"Liu W, Srivastava A, Zhang J (2011) A Mathematical Framework "
"for Protein Structure Comparison. PLoS Comput Biol 7(2): "
"e1001075.\n" );
noptions++;
}
// Throw an error if multiple -ISDM options were given by the user.
if (noptions > 1)
{
gmx_fatal(FARGS,"\nThis tool only supports using one optional ISDM at a time.\n");
}
// Check for error on -setmax before doing the calculations.
if (setmax != -1.0)
{
if (setmax <= 0.0)
{
gmx_fatal(FARGS,"\nThe argument for -setmax must be greater than 0.\n");
}
}
/* Opens trj. Reads first frame. Returns status. Allocates mem for x.
*
* Not sure which argument determines which atoms to pull info for.
*/
printf("\nCounting the number of frames.\n");
natoms=read_first_x(oenv, &status, ftp2fn(efTRX, NFILE, fnm), &t, &x, box);
// Now that we have iatoms, allocate memory for other arrays.
snew(nweights, natoms);
snew(iweights, iatoms);
snew(diff, iatoms);
// Initialize nweights to zeros.
for (i=0; i < natoms; i++)
{
nweights[i] = 0;
}
// Makes an array of weights. Necessary for reset_x.
for (i=0; i < iatoms; i++)
{
// Give a value for the weights.
nweights[(int)index[i]] = 1;
iweights[i] = 1;
// While we're at it, initialize diff to zeros.
diff[i] = 0;
}
nframes = 0; t2 = 0;
do
{
/* This loop doesn't do anything.
*
* It's just the most reliable way to find the number of frames.
*/
t1 = t2;
t2 = t;
nframes++;
} while(read_next_x(oenv, status, &t, natoms, x, box));
// Close the trajectory.
close_trj(status);
// Throw an error if there aren't enough frames.
if (nframes < 2)
{
gmx_fatal(FARGS, "\nThe trajectory must have at least 2 frames.\n");
}
if (bVar)
{
if (nframes < 3)
{
gmx_fatal(FARGS, "\nCalculating variance requires at least 3 "
"frames.\n");
}
}
// Find trajectory time steps. Assumes even spacing. Find nframes / 2.
dt = t2 - t1;
nf2 = nframes / 2;
// Reference frames to calculate based on.
if (user_bf > nframes)
{
gmx_fatal(FARGS,"\nArgument to -bf must be between 1 and last frame.\n");
}
if (user_ef > nframes)
{
gmx_fatal(FARGS,"\nArgument to -ef must be between 1 and last frame.\n");
}
if (user_bf < 1)
{
bf = 1;
}
else
{
bf = user_bf;
}
if (user_ef < 1)
{
ef = nframes;
}
else
{
ef = user_ef;
}
if (ef < bf)
{
gmx_fatal(FARGS,"\nArgument to -bf must be less than argument to -ef.\n");
}
// Check for errors for the -td option before calculations begin.
if (bTD)
{
if (user_td < 1)
{
gmx_fatal(FARGS,"\nThe -tdo option requires -td to be set. \n");
}
if (user_td >= nframes)
{
gmx_fatal(FARGS,"\nValue of -td must be less than nframes - 1.\n");
}
}
// Create an array to hold all frames.
snew(frames, nframes);
// Create arrays based on nframes.
snew(maxdiff, nframes);
snew(avgdiff, nframes);
snew(ISDmat, nframes);
for (i = 0; i < nframes; i++)
{
maxdiff[i] = 0.0;
avgdiff[i] = 0.0;
snew(ISDmat[i], nframes);
}
nframes2 = nframes * nframes;
/* Opens trj. Reads first frame. Returns status. Allocates mem for x.
*
* Not sure which argument determines which atoms to pull info for.
*/
printf("\nStoring trajectory to memory.\n");
natoms=read_first_x(oenv, &status, ftp2fn(efTRX, NFILE, fnm), &t, &x, box);
// Initialize index to keep track of current frame.
i = 0;
// This is for removing periodic boundary conditions.
gpbc = gmx_rmpbc_init(&top.idef, ePBC, natoms,box);
do
{
// Set aside new memory to store this frame.
snew(frames[i], iatoms);
// Removes periodic boundary conditions from x.
gmx_rmpbc(gpbc, natoms, box, x);
// Centers x. The NULL arguments are necessary to fit based on subset.
reset_x(natoms, NULL, natoms, NULL, x, nweights);
// Saves the current frame into frames.
for (n=0; n<iatoms; n++)
{
copy_rvec(x[(int)index[n]], frames[i][n]);
}
// Increment frame index.
i++;
} while(read_next_x(oenv, status, &t, natoms, x, box));
// Close the trajectory.
close_trj(status);
// Closes the thing that removes periodic boundary conditions.
gmx_rmpbc_done(gpbc);
// Initialize to 0.
maxISD = 0.0;
avgISD = 0.0;
/* Main calculation loop.
*/
printf("\nCalculating inter-structure distances. \n");
/* Originally this was designed to only loop through each pair of i and j
* one time to save half of the calculations. Eventually it became
* impractical to make sure that each ISDM was symmetrical, so now the
* algorithm takes the performance hit in favor of accuracy and simplicity.
*/
percent_calcs = 1;
finished_calcs = 0;
// Loop through reference frames.
#pragma omp parallel for schedule(dynamic) if (bMP)
for (i = 0; i < nframes; i++)
{
// Some memory required by each thread.
int j;
real ISD;
double dISD;
matrix rrot, rrotx, rroty, rrotz;
rvec *iframe, *jframe, *cframe, *rframe, rrot_xyz, xold;
if (bRROT)
{
snew(iframe,iatoms);
// Use up the first few random numbers that usually aren't random.
rrot_xyz[0] = (real)rand();
rrot_xyz[1] = (real)rand();
rrot_xyz[2] = (real)rand();
}
if (bFit)
{
snew(jframe,iatoms);
}
// Loop through fitting frames.
for (j = 0; j < nframes; j++)
{
/* In this section, we'll put calls to all of the ISDMs.
*
* Each should have its own if statement, so it is only executed
* if that option is specified at the command line.
*
* This function doesn't use the output stored in diff.
*/
// Skip for i == j (comparing structure with self).
if (i == j)
{
ISDmat[i][j] = 0;
continue;
}
// Copy ith frame.
if (bRROT)
{
// Make a copy of the ith frame.
copy_rvecn(frames[i], iframe, 0, iatoms);
rframe = iframe;
}
else
{
rframe = frames[i];
}
// Fit the jth frame.
if (bFit)
{
// Need to make a copy of the fit frame or bad stuff will happen.
copy_rvecn(frames[j], jframe, 0, iatoms);
// Aligns jframe to current reference frame.
do_fit(iatoms, iweights, frames[i], jframe);
cframe = jframe;
}
else
{
cframe = frames[j];
}
// Calls most ISDM options.
if (bDFLT || bRMSD || bSRMS || bRG || bSRG || bE2E || bSE2E ||
bMIR || bANG || bDIH || bANGDIH || bPHIPSI || bDRMS ||
bSDRMS || bPCOR || bACOR || bANG2 || bDIH2 || bANGDIH2 ||
bPHIPSI2 || bANGDIH2G || bRMSDIH || bRROT)
{
ISD = call_ISDM(iatoms, cframe, rframe, ISDM);
}
// MAMMOTH. User gives -mammoth option.
if (bMAMMOTH)
{
// Calculate MAMMOTH comparison.
ISD = calc_mammoth(iatoms, cframe, rframe, rnum);
}
// ESA.
if (bESA)
{
// Calculate ESA comparison.
ISD = calc_esa(iatoms, cframe, rframe);
}
// Use doubles instead of reals for the summations.
dISD = (double)ISD;
// Add difference to the difference matrix.
ISDmat[i][j] = ISD;
// Update the max and avg difference for scaling.
if (dISD > maxdiff[i])
{
maxdiff[i] = dISD;
}
avgdiff[i] += dISD;
// Debugging.
//printf("On the %i th loop. \n",j);
}
// Average difference for each frame.
avgdiff[i] /= (nframes - 1);
// Update progress output. OpenMP critical section.
#pragma omp critical
{
finished_calcs += nframes;
while ((double)(finished_calcs) / nframes2 >= (double)percent_calcs / 100)
{
fprintf(stderr, "Approximately %i percent complete. \r", percent_calcs);
percent_calcs++;
}
} // End of OpenMP critical section.
// Free memory used in parallel section.
if (bRROT)
{
sfree(iframe);
}
if (bFit)
{
sfree(jframe);
}
} // End of OpenMP parallel for loop.
fprintf(stderr, "\n\n\n");
// Find the final average of differences.
for (i = 0; i < nframes; i++)
{
avgISD += avgdiff[i];
}
avgISD /= nframes;
int j;
if (bISD)
{
// Opens the output file.
out = opt2FILE("-isd", NFILE, fnm, "w");
// Write output.
for (i = 0; i < nframes; i++)
{
fprintf(out, "%12.8f", ISDmat[i][0]);
for (j = 1; j < nframes; j++)
{
fprintf(out, ",%12.8f", ISDmat[i][j]);
}
fprintf(out, "\n");
}
// Close the output file.
ffclose(out);
}
/* Implements Torgerson's classical multi-dimensional scaling (CMDS)
* algorithm.
*
* 1) Convert the ISD matrix to the squared proximities matrix (P2).
*
* 2) Perform double centering on P2.
* [B = (-1/2) * J * P2 * J, where J = I - (UnitMatrix / nframes)]
*
* 3) Solve for the eigenvalues and eigenvectors.
*
* 4) Keep only the dimensions corresponding to positive eigenvalues.
* The rest are imaginary dimensions.
* [The requirement to keep the dimension here is that both the
* eigenvalue and the root of the eigenvalue must be greater than
* zero to rule out dimensions below the precision limit.]
*
* 5) Sign convention. This may not be necessary.
*/
fprintf(stderr, "Performing MDS.\n");
// Allocate memory.
snew(J, nframes * nframes);
snew(P2, nframes * nframes);
snew(P2J, nframes * nframes);
snew(V, nframes * nframes);
snew(E, nframes);
snew(Va, nframes);
for (i = 0; i < nframes; i++)
{
Va[i] = &V[i * nframes];
}
// Step 1.
fprintf(stderr, "MDS step 1 of 5. \n");
for (i = 0; i < nframes; i++)
{
for (j = 0; j < nframes; j++)
{
P2[(i * nframes) + j] = ISDmat[i][j] * ISDmat[i][j];
}
}
// Step 2.
fprintf(stderr, "MDS step 2 of 5. \n");
// Constructs J.
for (i = 0; i < nframes; i++)
{
for (j = 0; j < nframes; j++)
{
if (i == j)
{
J[(i * nframes) + j] = 1.0 - (1.0 / nframes);
}
else
{
J[(i * nframes) + j] = -1.0 / nframes;
}
}
}
// Solve for B.
mat_mult_mat(P2, J, nframes, nframes, nframes, P2J, bMP);
B = P2; // Finished with the memory in P2. Reuse it to store B.
scl_mult_mat(-0.5, J, nframes, nframes, J);
mat_mult_mat(J, P2J, nframes, nframes, nframes, B, bMP);
// Step 3.
fprintf(stderr, "MDS step 3 of 5. \n");
// Fix assymetry in B caused by precision limits.
BT = J; // Finished with the memory in J. Reuse it to store BT.
mat_transpose(B, nframes, nframes, BT);
for (i = 0; i < nframes; i++)
{
for (j = 0; j < nframes; j++)
{
B[(i * nframes) + j] = (B[(i * nframes) + j] +
BT[(i * nframes) + j]) / 2.0;
}
}
// Call the eigensolver which uses a lapack backend.
// E and V are sorted ascending by eigensolver.
eigensolver(B, nframes, 0, nframes, E, V);
// Step 4.
fprintf(stderr, "MDS step 4 of 5. \n");
// Find eigenvalues > 0.0.
for (i = 0; i < nframes; i++)
{
if (E[i] > 0.0)
{
if (sqrt(E[i]) > 0.0)
{
p = i;
np = nframes - p;
break;
}
}
if (i == (nframes - 1))
{
gmx_fatal(FARGS,"\nThere are zero positive eigenvalues.\n");
}
}
// Save coordinates in reduced dimensions.
snew(MDSa, nframes * np);
snew(MDS, nframes);
for (i = 0; i < nframes; i++)
{
MDS[i] = &MDSa[np * i];
}
for (i = 0; i < nframes; i++)
{
for (j = 0; j < np; j++)
{
MDS[i][j] = sqrt(E[nframes - j - 1]) * Va[nframes - j - 1][i];
}
}
// Step 5.
fprintf(stderr, "MDS step 5 of 5. \n");
for (j = 0; j < np; j++)
{
maxcoor = -1.0;
for (i = 0; i < nframes; i++)
{
abscoor = abs(MDS[i][j]);
if (abscoor > maxcoor)
{
maxcoor = abscoor;
maxcoori = i;
}
}
if (MDS[maxcoori][j] < 0.0)
{
for (i = 0; i < nframes; i++)
{
MDS[i][j] *= -1.0;
}
}
}
// Output the eigenvectors.
if (bVec)
{
// Opens the output file.
out = opt2FILE("-vec", NFILE, fnm, "w");
// Write output.
for (j = 0; j < nframes; j++)
{
fprintf(out, "%15.6e", Va[0][j]);
for (i = 1; i < nframes; i++)
{
fprintf(out, ",%15.6e", Va[i][j]);
}
fprintf(out, "\n");
}
// Close the output file.
ffclose(out);
}
// Release memory.
sfree(J);
sfree(P2);
sfree(P2J);
sfree(V);
sfree(Va);
fprintf(stderr, "\nClassical MDS Complete. \n\n");
// Output the eigenvalues.
if (bEig)
{
// Opens the output file.
out = xvgropen(opt2fn("-eig", NFILE, fnm),
"MDS Eigenvalues",
"Dimension",
"Eigenvalue",
oenv);
// Write output.
for (i = 1; i <= nframes; i++)
{
// Print in reversed order.
fprintf(out, "%-6i %15.8f \n", i, E[nframes - i]);
}
// Close the output file.
ffclose(out);
}
// Estimate the number of dimensions explained by noise.
if (bNoise)
{
printf("\n\n");
printf("Positive eigenvalues correspond to real dimensions. ");
printf("Negative eigenvalues correspond to imaginary dimensions.\n\n");
// Sum the positive and negative eigenvalues.
cumpe = 0.0;
for (i = p; i < nframes; i++)
{
cumpe += E[i];
}
sumne = 0.0;
for (i = 0; i < p; i++)
{
sumne += E[i];
}
printf("Sum of positive eigenvalues: %12.6f \n", cumpe);
printf("Sum of negative eigenvalues: %12.6f \n", sumne);
// Output the explained noise.
printf("%-6i eigenvalues are positive.\n", np);
printf("%-6i eigenvalues are zero or negative.\n", nframes - np);
cumpe = 0.0;
for (i = p; i < nframes; i++)
{
cumpe += E[i];
if (cumpe > abs(sumne))
{
break;
}
}
printf("%-6i positive eigenvalues can be explained by negative "
"eigenvalues.\n", i - p);
cumpe = 0.0;
for (i = p; i < nframes; i++)
{
cumpe += E[i];
if (cumpe > noisefloor)
{
break;
}
}
printf("%-6i positive eigenvalues can be explained by estimated "
"thermal noise.\n", i - p);
cumpe = 0.0;
for (i = p; i < nframes; i++)
{
cumpe += E[i];
if (cumpe > (abs(sumne) + noisefloor))
{
break;
}
}
printf("%-6i positive eigenvalues can be explained by estimated "
"thermal noise and algorithmic noise combined.\n\n", i - p);
}
// Output dimensionally reduced coordinates.
if (bMDS)
{
// Opens the output file.
out = opt2FILE("-mds", NFILE, fnm, "w");
// Write output.
for (i = 0; i < nframes; i++)
{
fprintf(out, "%12.8f", MDS[i][0]);
for (j = 1; j < np; j++)
{
fprintf(out, ",%12.8f", MDS[i][j]);
}
fprintf(out, "\n");
}
// Close the output file.
ffclose(out);
}
// Allocates memory to store the approximated ISD.
if (bRcc || bMRg || bDRg || bPy || bM)
{
snew(EISD, nframes);
snew(EISDm, nframes * nframes);
for (i = 0; i < nframes; i++)
{
EISD[i] = &EISDm[nframes * i];
}
}
// Reduced dimensional visualization.
if (bPy)
{
// Calculate accuracy of the displayed results.
calc_EISD(MDS, nframes, 6, EISD);
Rcc = calc_rcc(ISDmat, EISD, nframes);
fprintf(stdout, "The accuracy for 6D MDS is R = %8.4f.\n\n", Rcc);
// Opens the output file.
out = opt2FILE("-py", NFILE, fnm, "w");
// Python script header (py).
fprintf(out, "# Plots MDS output in 6 dimensions:\n");
fprintf(out, "# x, y, z, r, g, b\n\n");
// Import modules (py).
fprintf(out, "from mayavi import mlab\n");
fprintf(out, "import numpy as np\n\n");
// Save data to numpy array (py).
fprintf(out, "# Save data to numpy array.\n");
fprintf(out, "MDS = np.array([[%8.4f", MDS[0][0]);
for (j = 1; j < 6; j++)
{
fprintf(out, ",%8.4f", MDS[0][j]);
}
fprintf(out, "]");
for (i = 1; i < nframes; i++)
{
fprintf(out, ",\n [%8.4f", MDS[i][0]);
for (j = 1; j < 6; j++)
{
fprintf(out, ",%8.4f", MDS[i][j]);
}
fprintf(out, "]");
}
fprintf(out, "])\n\n");
// Calculate box center and range, center at zero (py).
fprintf(out, "# Calculate box center and range.\n");
fprintf(out, "bctr = np.mean(MDS, 0)\n");
fprintf(out, "MDS = np.subtract(MDS, bctr)\n");
fprintf(out, "bmin = np.min(MDS) #- Rbead\n");
fprintf(out, "bmax = np.max(MDS) #+ Rbead\n\n");
// Split MDS by dimensions. Recenter and rescale rgb dimensions (py).
fprintf(out, "# Split MDS by dimensions. Recenter to 0.5.\n");
fprintf(out, "xyz, rgb = np.hsplit(MDS, 2)\n");
fprintf(out, "color_sf = 0.8 / (bmax - bmin)\n");
fprintf(out, "rgb = np.add(np.multiply(rgb, color_sf), 0.5)\n");
fprintf(out, "s = np.array([0.01])\n");
fprintf(out, "s = s[0]\n\n");
/*
* // Display first coordinate and set up figure (py).
* fprintf(out, "# Display first coordinate and set up figure.\n");
* fprintf(out, "x = xyz[0, 0]\n");
* fprintf(out, "y = xyz[0, 1]\n");
* fprintf(out, "z = xyz[0, 2]\n");
* fprintf(out, "r = rgb[0, 0]\n");
* fprintf(out, "g = rgb[0, 1]\n");
* fprintf(out, "b = rgb[0, 2]\n");
* fprintf(out, "if r > 1.0:\n r = 1.0\n");
* fprintf(out, "if r < 0.0:\n r = 0.0\n");
* fprintf(out, "if g > 1.0:\n g = 1.0\n");
* fprintf(out, "if g < 0.0:\n g = 0.0\n");
* fprintf(out, "if b > 1.0:\n b = 1.0\n");
* fprintf(out, "if b < 0.0:\n b = 0.0\n");
* fprintf(out, "mlab.points3d(x, y, z, color=(r, g, b), ");
* fprintf(out, "extent=[bmin, bmax, bmin, bmax, bmin, bmax])\n\n");
*/
// Display coordinates (py).
fprintf(out, "# Display coordinates.\n");
fprintf(out, "for i in range(0, %i):\n", nframes);
fprintf(out, " x = xyz[i, 0]\n");
fprintf(out, " y = xyz[i, 1]\n");
fprintf(out, " z = xyz[i, 2]\n");
fprintf(out, " r = rgb[i, 0]\n");
fprintf(out, " g = rgb[i, 1]\n");
fprintf(out, " b = rgb[i, 2]\n");
fprintf(out, " if r > 1.0:\n r = 1.0\n");
fprintf(out, " if r < 0.0:\n r = 0.0\n");
fprintf(out, " if g > 1.0:\n g = 1.0\n");
fprintf(out, " if g < 0.0:\n g = 0.0\n");
fprintf(out, " if b > 1.0:\n b = 1.0\n");
fprintf(out, " if b < 0.0:\n b = 0.0\n");
fprintf(out, " mlab.points3d(x, y, z, s, color=(r, g, b), scale_factor=1)\n\n");
// Close the output file.
ffclose(out);
}
// Reduced dimensional visualization.
if (bM)
{
// Calculate accuracy of the displayed results.
calc_EISD(MDS, nframes, 6, EISD);
Rcc = calc_rcc(ISDmat, EISD, nframes);
fprintf(stdout, "The accuracy for 6D MDS is R = %8.4f.\n\n", Rcc);
// Opens the output file.
out = opt2FILE("-m", NFILE, fnm, "w");
// Octave function and comments.
fprintf(out,
"function [MDSout, MDS] = disp6D(varargin)\n"
"%% function [MDSout, MDS] = disp6D(varargin)\n"
"%%\n"
"%% 'Delay' : Pause between frames (numeric, units = ps).\n"
"%% Setting Delay creates a movie-like output.\n"
"%% 'TimeStep' : Time per frame (numeric, units = ps).\n"
"%% 'NSims' : Display N simulations independently (numeric).\n"
"%% 'NAvg' : Runs an averaging window of size 2 * NAvg + 1.\n"
"%% 'Radius' : Sphere size (numeric).\n"
"%% 'Res' : Sphere resolution (numeric).\n"
"%% 'NSkip' : Only display every NSkip + 1 sphere (numeric).\n"
"%% 'NClust' : Enables clustering with n clusters (numeric).\n"
"%% 'Title' : Figure title (char).\n"
"%% 'PNGClust' : Create PNG of clustering (char).\n"
"%% 'PNGName' : Create PNG of CMDS (char).\n"
"%% 'GIFName' : Create animated GIF (char).\n"
"%% 'GIFStep' : Frames per image (numeric).\n"
"%% 'bShow' : Plot even if no image is written (logical).\n"
"%% 'ShowLine' : Connect spheres (logical).\n"
"%% 'Vis3D' : Better, but causes error in Octave (logical).\n"
"%% 'Out2D' : Output in 2D even if NSims > 1 (logical).\n"
"%%\n"
"%% Defaults : \n"
"%% No delay, 1.0 ps time step, one simulation, no averaging \n"
"%% window, radius auto, sphere resolution 6, no skipping, no \n"
"%% title, no PNG, no GIF, 1.0 frame GIF step, do not show \n"
"%% no line, no Vis3D, output MDSout in 2D.\n"
"%%\n"
"%% Plots MDS output in 6 dimensions:\n"
"%% x, y, z, r, g, b\n"
"\n"
);
// Defaults.
fprintf(out,
"%% Set defaults.\n"
"defDelay = -1.0;\n"
"defTimeStep = -1.0;\n"
"defNSims = -1.0;\n"
"defNAvg = -1.0;\n"
"defRadius = -1.0;\n"
"defRes = -1.0;\n"
"defNSkip = -1.0;\n"
"defNClust = -1.0;\n"
"defTitle = '';\n"
"defPNGClust = '';\n"
"defPNGName = '';\n"
"defGIFName = '';\n"
"defGIFStep = -1.0;\n"
"defbShow = false;\n"
"defShowLine = false;\n"
"defVis3D = false;\n"
"defOut2D = true;\n"
"\n"
);
fprintf(out,
"%% Initialize parser.\n"
"p = inputParser;\n"
"addOptional(p, 'Delay', defDelay, @isnumeric);\n"
"addOptional(p, 'TimeStep', defTimeStep, @isnumeric);\n"
"addOptional(p, 'NSims', defNSims, @isnumeric);\n"
"addOptional(p, 'NAvg', defNAvg, @isnumeric);\n"
"addOptional(p, 'Radius', defRadius, @isnumeric);\n"
"addOptional(p, 'Res', defRes, @isnumeric);\n"
"addOptional(p, 'NSkip', defNSkip, @isnumeric);\n"
"addOptional(p, 'NClust', defNClust, @isnumeric);\n"
"addOptional(p, 'Title', defTitle, @ischar);\n"
"addOptional(p, 'PNGClust', defPNGClust, @ischar);\n"
"addOptional(p, 'PNGName', defPNGName, @ischar);\n"
"addOptional(p, 'GIFName', defGIFName, @ischar);\n"
"addOptional(p, 'GIFStep', defGIFStep, @isnumeric);\n"
"addOptional(p, 'bShow', defbShow);\n"
"addOptional(p, 'ShowLine', defShowLine);\n"
"addOptional(p, 'Vis3D', defVis3D);\n"
"addOptional(p, 'Out2D', defOut2D);\n"
"\n"
"parse(p, varargin{:});\n"
"PNGClust = p.Results.PNGClust;\n"
"PNGName = p.Results.PNGName;\n"
"GIFName = p.Results.GIFName;\n"
"bShow = p.Results.bShow;\n"
"\n"
);
// Save data to matrix.
fprintf(out, "%% Save data to matrix called MDS.\n");
fprintf(out, "MDS = [%8.4f", MDS[0][0]);
for (j = 1; j < 6; j++)
{
fprintf(out, ", %8.4f", MDS[0][j]);
}
for (i = 1; i < nframes; i++)
{
fprintf(out, ";\n %8.4f", MDS[i][0]);
for (j = 1; j < 6; j++)
{
fprintf(out, ", %8.4f", MDS[i][j]);
}
}
fprintf(out, "];\n\n");
// Accuracy of MDS.
fprintf(out, "%% Print correlation coefficient of MDS and ISD.\n");
fprintf(out, "fprintf('The accuracy of MDS is: %%8.4f \\n', %8.4f)\n\n", Rcc);
// Test NAvg, NSkip, NSims, and NClust.
fprintf(out,
"%% Test NAvg, NSkip, NSims, and NClust.\n"
"NSims = p.Results.NSims;\n"
"NSkip = p.Results.NSkip;\n"
"NAvg = p.Results.NAvg;\n"
"NClust = p.Results.NClust;\n"
"if (rem(NSims, 1) ~= 0)\n"
" error('NSims should have a positive integer value.')\n"
"end\n"
"\n"
"if (rem(NSkip, 1) ~= 0)\n"
" error('NSkip should have a positive integer value.')\n"
"end\n"
"\n"
"if (rem(NAvg, 1) ~= 0)\n"
" error('NAvg should have a positive integer value.')\n"
"end\n"
"\n"
"if (rem(NClust, 1) ~= 0)\n"
" error('NClust should have a positive integer value.')\n"
"end\n"
"\n"
);
fprintf(out,
"%% Rearrange MDS matrix by simulation.\n"
"nframes = size(MDS, 1);\n"
"if (NSims < 1)\n"
" NSims = 1;\n"
" NPerSim = nframes;\n"
" MDSmat = MDS;\n"
"else\n"
" NSims = fix(NSims);\n"
" NPerSim = fix(nframes / NSims);\n"
" MDSmat = zeros(NPerSim, 6, NSims);\n"
" for i = 1:NSims\n"
" i1 = (i - 1) * NPerSim + 1;\n"
" i2 = i * NPerSim;\n"
" MDSmat(:, :, i) = MDS(i1:i2, :);\n"
" end\n"
"end\n"
);
fprintf(out,
"%% Apply averaging filter.\n"
"MDSout = MDSmat;\n"
"if (NAvg >= 1)\n"
" NAvg = fix(NAvg);\n"
" for i = 1:NSims\n"
" for j = 1:NPerSim\n"
" j1 = j - NAvg;\n"
" if (j1 < 1)\n"
" j1 = 1;\n"
" end\n"
" j2 = j + NAvg;\n"
" if (j2 > NPerSim)\n"
" j2 = NPerSim;\n"
" end\n"
" for k = 1:6\n"
" MDSijk = mean(MDSmat(j1:j2, k, i));\n"
" MDSout(j, k, i) = MDSijk;\n"
" end\n"
" end\n"
" end\n"
"end\n"
);
// Set bead radius. Calculate box center and range.
fprintf(out,
"%% Calculate plot limits.\n"
"bsize = max(max(max(MDSout))) - min(min(min(MDSout)));\n"
"if (p.Results.Radius < 0.0)\n"
" R = 0.01 * bsize;\n"
"else\n"
" R = p.Results.Radius;\n"
"end\n"
"bctr = mean(mean(MDSout, 3));\n"
"bmin = min(min(min(MDSout))) - R;\n"
"bmax = max(max(max(MDSout))) + R;\n"
"color_sf = 0.8 / bsize;\n"
"\n"
);
// Split MDS by dimensions. Recenter and rescale rgb dimensions.
fprintf(out,
"%% Split MDS by dimensions. Recenter rgb to 0.5.\n"
"x = zeros(NPerSim, NSims);\n"
"y = x;\n"
"z = x;\n"
"r = x;\n"
"g = x;\n"
"b = x;\n"
"for i = 1:NSims\n"
" x(:, i) = MDSout(:, 1, i);\n"
" y(:, i) = MDSout(:, 2, i);\n"
" z(:, i) = MDSout(:, 3, i);\n"
" r(:, i) = MDSout(:, 4, i);\n"
" g(:, i) = MDSout(:, 5, i);\n"
" b(:, i) = MDSout(:, 6, i);\n"
" \n"
" %% Rescale colors.\n"
" r(:, i) = (r(:, i) - bctr(4)) * color_sf + 0.5;\n"
" g(:, i) = (g(:, i) - bctr(5)) * color_sf + 0.5;\n"
" b(:, i) = (b(:, i) - bctr(6)) * color_sf + 0.5;\n"
"end\n"
"\n"
);
// Setup main figure.
fprintf(out, "%% Setup figure.\n");
fprintf(out, "n = %6i;\n", nframes);
fprintf(out,
"if (p.Results.Res < 0.0)\n"
" Res = 6;\n"
"else\n"
" Res = p.Results.Res;\n"
"end\n"
"[Sx, Sy, Sz] = sphere(Res);\n"
"Sx = R * Sx; Sy = R * Sy; Sz = R * Sz;\n"
"figure;\n"
"axis([bmin, bmax, bmin, bmax, bmin, bmax]);\n"
"\n"
"%% Choose axis display style.\n"
"if (p.Results.Vis3D)\n"
" axis('vis3d');\n"
"else\n"
" axis('equal');\n"
"end\n"
"\n"
);
// Display coordinates.
fprintf(out,
"%% Display 6D coordinates.\n"
"hold on;\n"
"if (NSkip < 1)\n"
" NSkip = 1;\n"
"else\n"
" NSkip = fix(NSkip);\n"
"end\n"
"\n"
"if (~strcmp(GIFName, '') || ~strcmp(PNGName, '') || bShow)\n"
" for i = 1:NPerSim\n"
" if (p.Results.Delay > 0.0)\n"
" pause(p.Results.Delay);\n"
" end\n"
" if (p.Results.TimeStep > 0.0)\n"
" iTime = num2str(i * p.Results.TimeStep);\n"
" iTime = strcat(iTime,' ns');\n"
" else\n"
" iTime = '';\n"
" end\n"
" iTitle = strcat(p.Results.Title,' ',iTime);\n"
" title(iTitle);\n"
" \n"
" for j = 1:NSims\n"
" if (mod(i, NSkip) == 0)\n"
" c = [r(i, j), g(i, j), b(i, j)];\n"
" h = surf(Sx + x(i, j), Sy + y(i, j), Sz + z(i, j));\n"
" set(h, 'FaceColor', c, 'EdgeColor', 'none');\n"
" end\n"
" end\n"
" if (~strcmp(GIFName, ''))\n"
" if (i == 1)\n"
" n = 0;\n"
" f = getframe(gcf);\n"
" im = frame2im(f);\n"
" [imind,cm] = rgb2ind(im,256);\n"
" imwrite(imind, cm, GIFName, 'gif', 'Loopcount', inf);\n"
" continue\n"
" end\n"
" n = n + 1;\n"
" if (n >= p.Results.GIFStep)\n"
" n = 0;\n"
" f = getframe(gcf);\n"
" im = frame2im(f);\n"
" [imind,cm] = rgb2ind(im,256);\n"
" imwrite(imind, cm, GIFName, 'gif', 'WriteMode', 'append');\n"
" end\n"
" end\n"
" end\n"
" if (~strcmp(PNGName, ''))\n"
" print(PNGName, '-dpng')\n"
" end\n"
"end\n"
"\n"
);
// Line option.
fprintf(out,
"%% Optionally draw a line to show the time component.\n"
"if (p.Results.ShowLine)\n"
" c = [0.75, 0.75, 0.75];\n"
" plot3(x, y, z, 'LineWidth', 1, 'Color', c);\n"
"end\n"
"hold off;\n"
"\n"
);
// Convert MDSout.
fprintf(out,
"%% Convert MDSout.\n"
"if (p.Results.Out2D || (NClust >= 1))\n"
" MDSmat = zeros(NPerSim * NSims, 6);\n"
" for i = 1:NSims\n"
" i1 = (i - 1) * NPerSim + 1;\n"
" i2 = i * NPerSim;\n"
" MDSmat(i1:i2, :) = MDSout(:, :, i);\n"
" end\n"
" if (p.Results.Out2D)\n"
" MDSout = MDSmat;\n"
" end\n"
"end\n"
"\n"
);
// Apply kmeans clustering.
fprintf(out,
"%% Apply clustering.\n"
"if (NClust >= 1)\n"
" figure;\n"
" colormap(gcf, jet(NClust));\n"
" idx = kmeans(MDSmat, NClust);\n"
" for i = 1:NClust\n"
" end\n"
"end\n"
"\n"
);
// Close .m script.
fprintf(out, "end");
// Close the output file.
ffclose(out);
}
// Tests the accuracy of the dimensionally reduced coordinates.
if (bRcc)
{
// Error checking for rcutoff.
if (rcutoff < 0.0)
{
gmx_fatal(FARGS,"\nThe argument for -rcutoff must be greater "
"than or equal to 0.\n");
}
fprintf(stderr, "\nCalculating accuracy of dimensionality reduction."
"\n");
// Opens the output file.
out = xvgropen(opt2fn("-rcc", NFILE, fnm),
"Accuracy of Dimensionality Reduction",
"Dimension",
"Correlation Coefficient, R",
oenv);
for (d = 1; d <= np; d++)
{
// Calculate correlation coefficient.
calc_EISD(MDS, nframes, d, EISD);
Rcc = calc_rcc(ISDmat, EISD, nframes);
// Write to file.
fprintf(out, "%-6i %12.8f \n", i, Rcc);
if (Rcc > rcutoff)
{
break;
}
}
// Close file.
ffclose(out);
printf("\nThe rcutoff is: %12.8f \n", rcutoff);
printf("The final correlation coefficient is: %12.8f \n", Rcc);
printf("The estimated dimensionality is: %-6i \n", d);
}
// Tests correlation between ISD and Rg.
if (bMRg)
{
fprintf(stderr, "\nCalculating correlation of ISD with Rg.\n");
// Opens the output file.
out = xvgropen(opt2fn("-mrg", NFILE, fnm),
"Correlation of Rg with ISD",
"Radius of Gyration, Rg (nm)",
"ISD",
oenv);
/* Main calculation loop.
*/
printf("\nCalculating Rg matrix. \n");
percent_calcs = 1;
// Loop through reference frames.
for (i = 0; i < nframes; i++)
{
// Loop through fitting frames.
for (j = 0; j < nframes; j++)
{
// Skip for i == j (comparing structure with self).
if (i == j)
{
EISD[i][j] = 0.0;
continue;
}
EISD[i][j] = call_ISDM(iatoms, frames[j], frames[i], "MRG");
// Write to file.
fprintf(out, "%12.8f %12.8f \n", EISD[i][j], ISDmat[i][j]);
}
// Update progress output.
while ((double)(i+1)/nframes >= (double)percent_calcs/100)
{
fprintf(stderr, "Approximately %i percent complete. \r",
percent_calcs);
percent_calcs++;
}
}
Rcc = calc_rcc(ISDmat, EISD, nframes);
printf("The Rg vs ISD correlation coefficient is: %12.8f \n", Rcc);
// Close file.
ffclose(out);
}
// Tests correlation between ISD and Rg difference.
if (bDRg)
{
fprintf(stderr, "\nCalculating correlation of ISD with Rg difference."
"\n");
// Opens the output file.
out = xvgropen(opt2fn("-drg", NFILE, fnm),
"Correlation of Rg difference with ISD",
"Radius of Gyration Difference (nm)",
"ISD",
oenv);
/* Main calculation loop.
*/
printf("\nCalculating Rg difference matrix. \n");
percent_calcs = 1;
// Loop through reference frames.
for (i = 0; i < nframes; i++)
{
// Loop through fitting frames.
for (j = 0; j < nframes; j++)
{
// Skip for i == j (comparing structure with self).
if (i == j)
{
EISD[i][j] = 0;
continue;
}
EISD[i][j] = call_ISDM(iatoms, frames[j], frames[i], "RG");
// Write to file.
fprintf(out, "%12.8f %12.8f \n", EISD[i][j], ISDmat[i][j]);
}
// Update progress output.
while ((double)(i+1)/nframes >= (double)percent_calcs/100)
{
fprintf(stderr, "Approximately %i percent complete. \r",
percent_calcs);
fflush(stderr);
percent_calcs++;
}
}
Rcc = calc_rcc(ISDmat, EISD, nframes);
printf("The Rg difference vs ISD correlation coefficient is: "
"%12.8f \n", Rcc);
// Close file.
ffclose(out);
}
// Closing.
thanx(stderr);
return 0;
}
|
lister.c | /*
* `Finding path motifs in large temporal graphs using algebraic fingerprints`
*
* This experimental source code is supplied to accompany the
* aforementioned paper.
*
* The source code is configured for a gcc build to a native
* microarchitecture that must support the AVX2 and PCLMULQDQ
* instruction set extensions. Other builds are possible but
* require manual configuration of 'Makefile' and 'builds.h'.
*
* The source code is subject to the following license.
*
* The MIT License (MIT)
*
* Copyright (c) 2020 S. Thejaswi, A. Gionis, J. Lauri
* Copyright (c) 2019 S. Thejaswi, A. Gionis
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<time.h>
#include<sys/utsname.h>
#include<string.h>
#include<stdarg.h>
#include<assert.h>
#include<ctype.h>
#include<omp.h>
/************************************************************* Configuration. */
#define MAX_K 32
#define MAX_SHADES 32
#define PREFETCH_PAD 32
#define MAX_THREADS 128
#define UNDEFINED -1
#define MATH_INF ((index_t)0x3FFFFFFF)
#include"builds.h" // get build config
typedef long int index_t; // default to 64-bit indexing
#include"gf.h" // finite fields
#include"ffprng.h" // fast-forward pseudorandom number generator
#define MIN(x,y) (x)<(y) ? (x) : (y)
#define MAX(x,y) (x)>(y) ? (x) : (y)
/********************************************************************* Flags. */
index_t flag_bin_input = 0; // default to ASCII input
/************************************************************* Common macros. */
/* Linked list navigation macros. */
#define pnlinknext(to,el) { (el)->next = (to)->next; (el)->prev = (to); (to)->next->prev = (el); (to)->next = (el); }
#define pnlinkprev(to,el) { (el)->prev = (to)->prev; (el)->next = (to); (to)->prev->next = (el); (to)->prev = (el); }
#define pnunlink(el) { (el)->next->prev = (el)->prev; (el)->prev->next = (el)->next; }
#define pnrelink(el) { (el)->next->prev = (el); (el)->prev->next = (el); }
/*********************************************************** Error reporting. */
#define ERROR(...) error(__FILE__,__LINE__,__func__,__VA_ARGS__);
static void error(const char *fn, int line, const char *func,
const char *format, ...)
{
va_list args;
va_start(args, format);
fprintf(stderr,
"ERROR [file = %s, line = %d]\n"
"%s: ",
fn,
line,
func);
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
va_end(args);
abort();
}
/********************************************************* Get the host name. */
#define MAX_HOSTNAME 256
const char *sysdep_hostname(void)
{
static char hn[MAX_HOSTNAME];
struct utsname undata;
uname(&undata);
strcpy(hn, undata.nodename);
return hn;
}
/********************************************************* Available threads. */
index_t num_threads(void)
{
#ifdef BUILD_PARALLEL
return omp_get_max_threads();
#else
return 1;
#endif
}
/********************************************** Memory allocation & tracking. */
#define MALLOC(x) malloc_wrapper(x)
#define FREE(x) free_wrapper(x)
index_t malloc_balance = 0;
struct malloc_track_struct
{
void *p;
size_t size;
struct malloc_track_struct *prev;
struct malloc_track_struct *next;
};
typedef struct malloc_track_struct malloc_track_t;
malloc_track_t malloc_track_root;
size_t malloc_total = 0;
#define MEMTRACK_STACK_CAPACITY 256
size_t memtrack_stack[MEMTRACK_STACK_CAPACITY];
index_t memtrack_stack_top = -1;
void *malloc_wrapper(size_t size)
{
if(malloc_balance == 0) {
malloc_track_root.prev = &malloc_track_root;
malloc_track_root.next = &malloc_track_root;
}
void *p = malloc(size);
if(p == NULL)
ERROR("malloc fails");
malloc_balance++;
malloc_track_t *t = (malloc_track_t *) malloc(sizeof(malloc_track_t));
t->p = p;
t->size = size;
pnlinkprev(&malloc_track_root, t);
malloc_total += size;
for(index_t i = 0; i <= memtrack_stack_top; i++)
if(memtrack_stack[i] < malloc_total)
memtrack_stack[i] = malloc_total;
return p;
}
void free_wrapper(void *p)
{
malloc_track_t *t = malloc_track_root.next;
for(;
t != &malloc_track_root;
t = t->next) {
if(t->p == p)
break;
}
if(t == &malloc_track_root)
ERROR("FREE issued on a non-tracked pointer %p", p);
malloc_total -= t->size;
pnunlink(t);
free(t);
free(p);
malloc_balance--;
}
index_t *alloc_idxtab(index_t n)
{
index_t *t = (index_t *) MALLOC(sizeof(index_t)*n);
return t;
}
void push_memtrack(void)
{
assert(memtrack_stack_top + 1 < MEMTRACK_STACK_CAPACITY);
memtrack_stack[++memtrack_stack_top] = malloc_total;
}
size_t pop_memtrack(void)
{
assert(memtrack_stack_top >= 0);
return memtrack_stack[memtrack_stack_top--];
}
size_t current_mem(void)
{
return malloc_total;
}
double inGiB(size_t s)
{
return (double) s / (1 << 30);
}
void print_current_mem(void)
{
fprintf(stdout, "{curr: %.2lfGiB}", inGiB(current_mem()));
fflush(stdout);
}
void print_pop_memtrack(void)
{
fprintf(stdout, "{peak: %.2lfGiB}", inGiB(pop_memtrack()));
fflush(stdout);
}
/******************************************************** Timing subroutines. */
#define TIME_STACK_CAPACITY 256
double start_stack[TIME_STACK_CAPACITY];
index_t start_stack_top = -1;
void push_time(void)
{
assert(start_stack_top + 1 < TIME_STACK_CAPACITY);
start_stack[++start_stack_top] = omp_get_wtime();
}
double pop_time(void)
{
double wstop = omp_get_wtime();
assert(start_stack_top >= 0);
double wstart = start_stack[start_stack_top--];
return (double) (1000.0*(wstop-wstart));
}
/******************************************************************* Sorting. */
void shellsort(index_t n, index_t *a)
{
index_t h = 1;
index_t i;
for(i = n/3; h < i; h = 3*h+1)
;
do {
for(i = h; i < n; i++) {
index_t v = a[i];
index_t j = i;
do {
index_t t = a[j-h];
if(t <= v)
break;
a[j] = t;
j -= h;
} while(j >= h);
a[j] = v;
}
h /= 3;
} while(h > 0);
}
#define LEFT(x) (x<<1)
#define RIGHT(x) ((x<<1)+1)
#define PARENT(x) (x>>1)
void heapsort_indext(index_t n, index_t *a)
{
/* Shift index origin from 0 to 1 for convenience. */
a--;
/* Build heap */
for(index_t i = 2; i <= n; i++) {
index_t x = i;
while(x > 1) {
index_t y = PARENT(x);
if(a[x] <= a[y]) {
/* heap property ok */
break;
}
/* Exchange a[x] and a[y] to enforce heap property */
index_t t = a[x];
a[x] = a[y];
a[y] = t;
x = y;
}
}
/* Repeat delete max and insert */
for(index_t i = n; i > 1; i--) {
index_t t = a[i];
/* Delete max */
a[i] = a[1];
/* Insert t */
index_t x = 1;
index_t y, z;
while((y = LEFT(x)) < i) {
z = RIGHT(x);
if(z < i && a[y] < a[z]) {
index_t s = z;
z = y;
y = s;
}
/* Invariant: a[y] >= a[z] */
if(t >= a[y]) {
/* ok to insert here without violating heap property */
break;
}
/* Move a[y] up the heap */
a[x] = a[y];
x = y;
}
/* Insert here */
a[x] = t;
}
}
/******************************************************* Bitmap manipulation. */
void bitset(index_t *map, index_t j, index_t value)
{
assert((value & (~1UL)) == 0);
map[j/64] = (map[j/64] & ~(1UL << (j%64))) | ((value&1) << (j%64));
}
index_t bitget(index_t *map, index_t j)
{
return (map[j/64]>>(j%64))&1UL;
}
/*************************************************** Random numbers and such. */
index_t irand(void)
{
return (((index_t) rand())<<31)^((index_t) rand());
}
void randshuffle_seq(index_t n, index_t *p, ffprng_t gen)
{
for(index_t i = 0; i < n-1; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t x = i+(rnd%(n-i));
index_t t = p[x];
p[x] = p[i];
p[i] = t;
}
}
void randperm(index_t n, index_t seed, index_t *p)
{
#ifdef BUILD_PARALLEL
index_t nt = 64;
#else
index_t nt = 1;
#endif
index_t block_size = n/nt;
index_t f[128][128];
assert(nt < 128);
ffprng_t base;
FFPRNG_INIT(base, seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
for(index_t j = 0; j < nt; j++)
f[t][j] = 0;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? n-1 : (start+block_size-1);
ffprng_t gen;
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t bin = (index_t) ((unsigned long) rnd)%((unsigned long)nt);
f[t][bin]++;
}
}
for(index_t bin = 0; bin < nt; bin++) {
for(index_t t = 1; t < nt; t++) {
f[0][bin] += f[t][bin];
}
}
index_t run = 0;
for(index_t j = 1; j <= nt; j++) {
index_t fp = f[0][j-1];
f[0][j-1] = run;
run += fp;
}
f[0][nt] = run;
FFPRNG_INIT(base, seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = 0;
index_t stop = n-1;
index_t pos = f[0][t];
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t bin = (index_t) ((unsigned long) rnd)%((unsigned long)nt);
if(bin == t)
p[pos++] = i;
}
assert(pos == f[0][t+1]);
}
FFPRNG_INIT(base, (seed^0x9078563412EFDCABL));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t fwd, gen;
index_t start = f[0][t];
index_t stop = f[0][t+1]-1;
index_t u;
FFPRNG_FWD(fwd, (1234567890123456L*t), base);
FFPRNG_RAND(u, fwd);
FFPRNG_INIT(gen, u);
randshuffle_seq(stop-start+1, p + start, gen);
}
}
/********************************** Initialize an array with random scalars. */
void randinits_scalar(scalar_t *a, index_t s, ffprng_scalar_t seed)
{
ffprng_t base;
FFPRNG_INIT(base, seed);
index_t nt = num_threads();
index_t block_size = s/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? s-1 : (start+block_size-1);
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
scalar_t rs = (scalar_t) rnd;
a[i] = rs;
}
}
}
/***************************************************** (Parallel) prefix sum. */
index_t prefixsum(index_t n, index_t *a, index_t k)
{
#ifdef BUILD_PARALLEL
index_t s[MAX_THREADS];
index_t nt = num_threads();
assert(nt < MAX_THREADS);
index_t length = n;
index_t block_size = length/nt;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t tsum = (stop-start+1)*k;
for(index_t u = start; u <= stop; u++)
tsum += a[u];
s[t] = tsum;
}
index_t run = 0;
for(index_t t = 1; t <= nt; t++) {
index_t v = s[t-1];
s[t-1] = run;
run += v;
}
s[nt] = run;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t trun = s[t];
for(index_t u = start; u <= stop; u++) {
index_t tv = a[u];
a[u] = trun;
trun += tv + k;
}
assert(trun == s[t+1]);
}
#else
index_t run = 0;
for(index_t u = 0; u < n; u++) {
index_t tv = a[u];
a[u] = run;
run += tv + k;
}
#endif
return run;
}
/************************************************************* Parallel sum. */
index_t parallelsum(index_t n, index_t *a)
{
index_t sum = 0;
#ifdef BUILD_PARALLEL
index_t s[MAX_THREADS];
index_t nt = num_threads();
assert(nt < MAX_THREADS);
index_t length = n;
index_t block_size = length/nt;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t tsum = 0;
for(index_t u = start; u <= stop; u++)
tsum += a[u];
s[t] = tsum;
}
for(index_t t = 0; t < nt; t++)
sum += s[t];
#else
for(index_t i = 0; i < n; i++) {
sum += a[i];
}
#endif
return sum;
}
// count number of non-zero values in an array
index_t parallelcount(index_t n, index_t *a)
{
index_t total_cnt = 0;
#ifdef BUILD_PARALLEL
index_t nt = num_threads();
index_t block_size = n/nt;
index_t *cnt_nt = alloc_idxtab(nt);
#pragma omp parallel for
for(index_t th = 0; th <nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
index_t cnt = 0;
for(index_t i = start; i <= stop; i++)
cnt += (a[i] ? 1 : 0);
cnt_nt[th] = cnt;
}
for(index_t th = 0; th < nt; th++)
total_cnt += cnt_nt[th];
#else
for(index_t i = 0; i < n; i++)
total_cnt += (a[i] ? 1 : 0);
#endif
return total_cnt;
}
/************************ Search for an interval of values in a sorted array. */
index_t get_interval(index_t n, index_t *a,
index_t lo_val, index_t hi_val,
index_t *iv_start, index_t *iv_end)
{
assert(n >= 0);
if(n == 0) {
*iv_start = 0;
return 0;
}
assert(lo_val <= hi_val);
// find first element in interval (if any) with binary search
index_t lo = 0;
index_t hi = n-1;
// at or above lo, and at or below hi (if any)
while(lo < hi) {
index_t mid = (lo+hi)/2; // lo <= mid < hi
index_t v = a[mid];
if(hi_val < v) {
hi = mid-1; // at or below hi (if any)
} else {
if(v < lo_val)
lo = mid+1; // at or above lo (if any), lo <= hi
else
hi = mid; // at or below hi (exists)
}
// 0 <= lo <= n-1
}
if(a[lo] < lo_val || a[lo] > hi_val) {
// array contains no values in interval
if(a[lo] < lo_val) {
lo++;
assert(lo == n || a[lo+1] > hi_val);
} else {
assert(lo == 0 || a[lo-1] < lo_val);
}
*iv_start = lo;
*iv_end = hi;
return 0;
}
assert(lo_val <= a[lo] && a[lo] <= hi_val);
*iv_start = lo;
// find interval end (last index in interval) with binary search
lo = 0;
hi = n-1;
// last index (if any) is at or above lo, and at or below hi
while(lo < hi) {
index_t mid = (lo+hi+1)/2; // lo < mid <= hi
index_t v = a[mid];
if(hi_val < v) {
hi = mid-1; // at or below hi, lo <= hi
} else {
if(v < lo_val)
lo = mid+1; // at or above lo
else
lo = mid; // at or above lo, lo <= hi
}
}
assert(lo == hi);
*iv_end = lo; // lo == hi
return 1+*iv_end-*iv_start; // return cut size
}
/******************************************************************** Stack. */
typedef struct stack_node {
index_t u;
index_t l;
index_t t;
} stack_node_t;
typedef struct stack {
index_t size; // size of stack
index_t n; // number of elements
stack_node_t *a;
}stk_t;
stk_t * stack_alloc(index_t size)
{
stk_t *s = (stk_t *) malloc(sizeof(stk_t));
s->size = size;
s->n = 0;
s->a = (stack_node_t *) malloc(s->size*sizeof(stack_node_t));
#ifdef DEBUG
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
e->u = UNDEFINED;
e->l = UNDEFINED;
e->t = UNDEFINED;
}
#endif
return s;
}
void stack_free(stk_t *s)
{
free(s->a);
free(s);
}
void stack_push(stk_t *s, stack_node_t *e_in)
{
assert(s->n < s->size);
stack_node_t *e = s->a + s->n;
e->u = e_in->u;
//e->l = e_in->l;
e->t = e_in->t;
s->n++;
}
void stack_pop(stk_t *s, stack_node_t *e_out)
{
assert(s->n > 0);
s->n--;
stack_node_t *e = s->a + s->n;
e_out->u = e->u;
//e_out->l = e->l;
e_out->t = e->t;
#ifdef DEBUG
e->u = UNDEFINED;
//e->l = UNDEFINED;
e->t = UNDEFINED;
#endif
}
void stack_top(stk_t *s, stack_node_t *e_out)
{
assert(s->n >= 0);
stack_node_t *e = s->a + s->n-1;
e_out->u = e->u;
e_out->l = e->l;
e_out->t = e->t;
}
void stack_empty(stk_t *s)
{
s->n = 0;
}
void stack_get_vertices(stk_t *s, index_t *uu)
{
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
uu[i] = e->u;
}
}
void stack_get_timestamps(stk_t *s, index_t *tt)
{
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
tt[i] = e->t;
}
}
#ifdef DEBUG
void print_stack(stk_t *s)
{
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "print stack\n");
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "size: %ld\n", s->size);
fprintf(stdout, "n: %ld\n", s->n);
fprintf(stdout, "a: ");
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
fprintf(stdout, "[%ld, %ld, %ld]%s", e->u, e->l, e->t, (i==s->n-1)?"\n":" ");
}
fprintf(stdout, "-----------------------------------------------\n");
}
void print_stacknode(stack_node_t *e)
{
fprintf(stdout, "print stack-node: [%ld, %ld, %ld]\n", e->u, e->l, e->t);
}
#endif
/****************************************************************** Sieving. */
long long int num_muls;
long long int trans_bytes;
#define SHADE_LINES ((MAX_SHADES+SCALARS_IN_LINE-1)/SCALARS_IN_LINE)
typedef unsigned int shade_map_t;
void constrained_sieve_pre(index_t n,
index_t k,
index_t g,
index_t pfx,
index_t num_shades,
shade_map_t *d_s,
ffprng_scalar_t seed,
line_array_t *d_x)
{
assert(g == SCALARS_IN_LINE);
assert(num_shades <= MAX_SHADES);
line_t wdj[SHADE_LINES*MAX_K];
ffprng_t base;
FFPRNG_INIT(base, seed);
for(index_t j = 0; j < k; j++) {
for(index_t dl = 0; dl < SHADE_LINES; dl++) {
index_t jsdl = j*SHADE_LINES+dl;
LINE_SET_ZERO(wdj[jsdl]);
for(index_t a = 0; a < SCALARS_IN_LINE; a++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, base);
scalar_t rs = (scalar_t) rnd;
LINE_STORE_SCALAR(wdj[jsdl], a, rs); // W: [cached]
}
}
}
index_t nt = num_threads();
index_t length = n;
index_t block_size = length/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
FFPRNG_FWD(gen, SHADE_LINES*SCALARS_IN_LINE*start, base);
line_t vd[SHADE_LINES];
for(index_t j = 0; j < SHADE_LINES; j++) {
LINE_SET_ZERO(vd[j]); // to cure an annoying compiler warning
}
for(index_t u = start; u <= stop; u++) {
scalar_t uu[MAX_K];
shade_map_t shades_u = d_s[u]; // R: n shade_map_t
for(index_t dl = 0; dl < SHADE_LINES; dl++) {
for(index_t a = 0; a < SCALARS_IN_LINE; a++) {
index_t d = dl*SCALARS_IN_LINE + a;
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
scalar_t rs = (scalar_t) rnd;
rs = rs & (-((scalar_t)((shades_u >> d)&(d < num_shades))));
LINE_STORE_SCALAR(vd[dl], a, rs); // W: [cached]
}
}
for(index_t j = 0; j < k; j++) {
scalar_t uj;
SCALAR_SET_ZERO(uj);
for(index_t dl = 0; dl < SHADE_LINES; dl++) {
index_t jsdl = j*SHADE_LINES+dl;
line_t ln;
LINE_MUL(ln, wdj[jsdl], vd[dl]); // R: [cached]
// MUL: n*SHADE_LINES*g*k
scalar_t lns;
LINE_SUM(lns, ln);
SCALAR_ADD(uj, uj, lns);
}
uu[j] = uj;
}
line_t ln;
LINE_SET_ZERO(ln);
for(index_t a = 0; a < SCALARS_IN_LINE; a++) {
index_t ap = a < (1L << k) ? pfx+a : 0;
scalar_t xua;
SCALAR_SET_ZERO(xua);
for(index_t j = 0; j < k; j++) {
scalar_t z_uj = uu[j]; // R: [cached]
z_uj = z_uj & (-((scalar_t)(((ap) >> j)&1)));
SCALAR_ADD(xua, xua, z_uj);
}
LINE_STORE_SCALAR(ln, a, xua);
}
LINE_STORE(d_x, u, ln); // W: ng scalar_t
}
}
num_muls += n*SHADE_LINES*g*k;
trans_bytes += sizeof(scalar_t)*n*g + sizeof(shade_map_t)*n;
}
/***************************************************************** Line sum. */
scalar_t line_sum(index_t l,
index_t g,
line_array_t *d_s)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
SCALAR_SET_ZERO(ts[t]);
index_t start = t*block_size;
index_t stop = (t == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
line_t acc;
LINE_SET_ZERO(acc);
for(index_t i = start; i <= stop; i++) {
LINE_LOAD(ln, d_s, i); // R: lg scalar_t
LINE_ADD(acc, acc, ln);
}
scalar_t lsum;
LINE_SUM(lsum, acc);
ts[t] = lsum;
}
scalar_t sum;
SCALAR_SET_ZERO(sum);
for(index_t t = 0; t < nt; t++) {
SCALAR_ADD(sum, sum, ts[t]);
}
trans_bytes += sizeof(scalar_t)*l*g;
return sum;
}
void vertex_acc(index_t l,
index_t g,
index_t stride,
line_array_t *d_s,
scalar_t *out)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
//scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
//SCALAR_SET_ZERO(ts[t]);
index_t start = th*block_size;
index_t stop = (th == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
scalar_t lsum;
for(index_t i = start; i <= stop; i++) {
LINE_LOAD(ln, d_s, i); // R: lg scalar_t
LINE_SUM(lsum, ln);
out[i] = lsum; // R: scalar_t, W: scalar_t
}
}
//scalar_t sum;
//SCALAR_SET_ZERO(sum);
//for(index_t t = 0; t < nt; t++) {
// SCALAR_ADD(sum, sum, ts[t]);
//}
trans_bytes += sizeof(scalar_t)*(l*g+2);
}
scalar_t line_sum_stride(index_t l,
index_t g,
index_t stride,
line_array_t *d_s)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
SCALAR_SET_ZERO(ts[th]);
index_t start = th*block_size;
index_t stop = (th == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
line_t acc;
LINE_SET_ZERO(acc);
for(index_t i = start; i <= stop; i++) {
index_t ii = i*stride;
LINE_LOAD(ln, d_s, ii); // R: lg scalar_t
LINE_ADD(acc, acc, ln);
}
scalar_t lsum;
LINE_SUM(lsum, acc);
ts[th] = lsum;
}
scalar_t sum;
SCALAR_SET_ZERO(sum);
for(index_t th = 0; th < nt; th++) {
SCALAR_ADD(sum, sum, ts[th]);
}
trans_bytes += sizeof(scalar_t)*l*g;
return sum;
}
void vertex_acc_stride(index_t l,
index_t g,
index_t stride,
line_array_t *d_s,
scalar_t *out)
{
index_t nt = num_threads();
index_t block_size = l/nt;
assert(nt < MAX_THREADS);
//scalar_t ts[MAX_THREADS];
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
//SCALAR_SET_ZERO(ts[th]);
index_t start = th*block_size;
index_t stop = (th == nt-1) ? l-1 : (start+block_size-1);
line_t ln;
scalar_t lsum;
for(index_t i = start; i <= stop; i++) {
index_t ii = i*stride;
LINE_LOAD(ln, d_s, ii); // R: lg scalar_t
LINE_SUM(lsum, ln);
out[i] = lsum; // R: scalar_t, W: scalar_t
}
}
//scalar_t sum;
//SCALAR_SET_ZERO(sum);
//for(index_t th = 0; th < nt; th++) {
// SCALAR_ADD(sum, sum, ts[th]);
//}
trans_bytes += sizeof(scalar_t)*(l*g+2);
}
/********************************** k-temppath generating function (mark 1). */
#if BUILD_GENF == 1
#define TEMP_PATH_LINE_IDX(n, k, tmax, l, t, u) (((l-1)*(tmax+1)*(n))+((n)*(t))+(u))
#ifdef DEBUG
#define PRINT_LINE(source) \
{ \
scalar_t *s = (scalar_t *)&source; \
for(index_t i = 0; i < SCALARS_IN_LINE; i++) { \
fprintf(stdout, SCALAR_FORMAT_STRING"%s", \
(long) s[i], \
i==SCALARS_IN_LINE-1 ? "\n":" "); \
} \
}
void print_dx(index_t n,
line_array_t *d_x)
{
fprintf(stdout, "d_x:\n");
for(index_t u = 0; u < n; u ++) {
line_t xu;
LINE_LOAD(xu, d_x, u);
fprintf(stdout, "%ld: ", u);
PRINT_LINE(xu);
}
}
void print_ds(index_t n,
index_t k,
index_t tmax,
line_array_t *d_s)
{
fprintf(stdout, "d_s: \n");
for(index_t l = 1; l <= k; l++) {
fprintf(stdout, "--------------------------------------------------\n");
fprintf(stdout, "--------------------------------------------------\n");
fprintf(stdout, "l: %ld\n", l);
fprintf(stdout, "--------------------------------------------------\n");
fprintf(stdout, "--------------------------------------------------\n");
for(index_t t = 0; t <= tmax; t++) {
fprintf(stdout, "--------------------------------------------------\n");
fprintf(stdout, "t: %ld\n", t);
fprintf(stdout, "--------------------------------------------------\n");
for(index_t u = 0; u < n; u++) {
fprintf(stdout, "%ld: ", u+1);
index_t i_ult = TEMP_PATH_LINE_IDX(n, k, tmax, l, t, u);
line_t p_ult;
LINE_LOAD(p_ult, d_s, i_ult);
PRINT_LINE(p_ult);
scalar_t sum;
LINE_SUM(sum, p_ult);
fprintf(stdout, "line sum: "SCALAR_FORMAT_STRING"\n",sum);
}
}
}
}
#endif
void init_ds_genf1(index_t n,
index_t k,
index_t tmax,
line_array_t *d_s)
{
line_t p_zero;
LINE_SET_ZERO(p_zero);
for(index_t l = 1; l <= k; l++) {
for(index_t t = 0; t <= tmax; t++) {
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_ult = TEMP_PATH_LINE_IDX(n, k, tmax, l, t, u);
LINE_STORE(d_s, i_ult, p_zero); // W: ng scalar_t
}
}
}
}
void k_temp_path_genf1_round(index_t n,
index_t m,
index_t k,
index_t tmax,
index_t t,
index_t g,
index_t l,
index_t *d_pos,
index_t *d_adj,
index_t yl_seed,
line_array_t *d_x,
line_array_t *d_s)
{
assert(g == SCALARS_IN_LINE);
index_t nt = num_threads();
index_t length = n;
index_t block_size = length/nt;
ffprng_t y_base;
FFPRNG_INIT(y_base, yl_seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? length-1 : (start+block_size-1);
ffprng_t y_gen;
index_t y_pos = d_pos[(t-1)*n+start]-((t-1)*n+start);
FFPRNG_FWD(y_gen, y_pos, y_base);
for(index_t u = start; u <= stop; u++) {
index_t pu = d_pos[n*(t-1)+u];
index_t deg = d_adj[pu];
line_t p_ult;
LINE_SET_ZERO(p_ult);
for(index_t j = 1; j <= deg; j++) {
index_t v = d_adj[pu+j];
line_t p_vl1t1;
index_t i_vl1t1 = TEMP_PATH_LINE_IDX(n, k, tmax, l-1, t-1, v);
LINE_LOAD(p_vl1t1, d_s, i_vl1t1);
#ifdef BUILD_PREFETCH
// prefetch next line
index_t nv = d_adj[pu+j+(j < deg ? 1 : 2)];
index_t i_nvl1t1 = TEMP_PATH_LINE_IDX(n, k, tmax, l-1, t-1, nv);
LINE_PREFETCH(d_s, i_nvl1t1);
#endif
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, y_gen);
scalar_t y_luvt = (scalar_t) rnd;
line_t sy;
LINE_MUL_SCALAR(sy, p_vl1t1, y_luvt);
LINE_ADD(p_ult, p_ult, sy);
}
line_t xu;
LINE_LOAD(xu, d_x, u);
LINE_MUL(p_ult, p_ult, xu);
line_t p_ult1;
index_t i_ult1 = TEMP_PATH_LINE_IDX(n, k, tmax, l, t-1, u);
LINE_LOAD(p_ult1, d_s, i_ult1);
LINE_ADD(p_ult, p_ult, p_ult1);
index_t i_ult = TEMP_PATH_LINE_IDX(n, k, tmax, l, t, u);
LINE_STORE(d_s, i_ult, p_ult); // W: ng scalar_t
}
}
// total edges at time `t`
index_t m_t = d_pos[n*(t-1) + n-1] - d_pos[n*(t-1)] - (n-1) +
d_adj[d_pos[n*(t-1)+(n-1)]];
trans_bytes += ((2*n*tmax)+m_t)*sizeof(index_t) + (2*n+m_t)*g*sizeof(scalar_t);
num_muls += (n*g+m_t);
}
scalar_t k_temp_path_genf1(index_t n,
index_t m,
index_t k,
index_t tmax,
index_t g,
index_t vert_loc,
index_t *d_pos,
index_t *d_adj,
ffprng_scalar_t y_seed,
line_array_t *d_x,
scalar_t *vs)
{
assert( g == SCALARS_IN_LINE);
assert( k >= 1);
line_array_t *d_s= (line_array_t *) MALLOC(LINE_ARRAY_SIZE(k*(tmax+1)*n*g));
init_ds_genf1(n, k, tmax, d_s);
// initialise: l = 1
for(index_t u = 0; u < n; u++) {
for(index_t t = 0; t <= tmax; t++) {
line_t xu;
LINE_LOAD(xu, d_x, u);
index_t i_u_1 = TEMP_PATH_LINE_IDX(n, k, tmax, 1, t, u);
LINE_STORE(d_s, i_u_1, xu);
}
}
srand(y_seed);
for(index_t l = 2; l <= k; l++) {
ffprng_scalar_t yl_seed = irand(); // new seed for each l
for(index_t t = l-1; t <= tmax; t++) {
k_temp_path_genf1_round(n, m, k, tmax, t, g, l,
d_pos, d_adj, yl_seed, d_x, d_s);
}
}
// sum up
index_t ii = TEMP_PATH_LINE_IDX(n, k, tmax, k, tmax, 0);
scalar_t sum = line_sum(n, g, ((line_array_t *)(((line_t *) d_s)+ii)));
// vertex-localisation
if(vert_loc) {
vertex_acc(n, g, k, ((line_array_t *)(((line_t *) d_s)+ii)), vs);
}
//print_ds(n, k, tmax, d_s);
// free memory
FREE(d_s);
return sum;
}
#endif
/*********************************** k-temppath generating function (mark 2) */
#if BUILD_GENF == 2
#define TEMP_PATH_LINE_IDX2(n, k, tmax, l, t, u) (((n)*(t))+(u))
void init_ds_genf2(index_t n,
index_t k,
index_t tmax,
line_array_t *d_s)
{
line_t p_zero;
LINE_SET_ZERO(p_zero);
for(index_t t = 0; t <= tmax; t++) {
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_ult = TEMP_PATH_LINE_IDX2(n, k, tmax, 1, t, u);
LINE_STORE(d_s, i_ult, p_zero); // W: ng scalar_t
}
}
}
void k_temp_path_genf2_round(index_t n,
index_t m,
index_t k,
index_t tmax,
index_t t,
index_t g,
index_t l,
index_t *d_pos,
index_t *d_adj,
index_t yl_seed,
line_array_t *d_x,
line_array_t *d_s1,
line_array_t *d_s2)
{
assert(g == SCALARS_IN_LINE);
index_t nt = num_threads();
index_t length = n;
index_t block_size = length/nt;
ffprng_t y_base;
FFPRNG_INIT(y_base, yl_seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? length-1 : (start+block_size-1);
ffprng_t y_gen;
index_t y_pos = d_pos[(t-1)*n+start]-((t-1)*n+start);
FFPRNG_FWD(y_gen, y_pos, y_base);
for(index_t u = start; u <= stop; u++) {
index_t pu = d_pos[n*(t-1)+u];
index_t deg = d_adj[pu];
line_t p_ult;
LINE_SET_ZERO(p_ult);
for(index_t j = 1; j <= deg; j++) {
index_t v = d_adj[pu+j];
line_t p_vl1t1;
index_t i_vl1t1 = TEMP_PATH_LINE_IDX2(n, k, tmax, l-1, t-1, v);
LINE_LOAD(p_vl1t1, d_s1, i_vl1t1);
#ifdef BUILD_PREFETCH
// prefetch next line
index_t nv = d_adj[pu+j+(j < deg ? 1 : 2)];
index_t i_nvl1t1 = TEMP_PATH_LINE_IDX2(n, k, tmax, l-1, t-1, nv);
LINE_PREFETCH(d_s1, i_nvl1t1);
#endif
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, y_gen);
scalar_t y_luvt = (scalar_t) rnd;
line_t sy;
LINE_MUL_SCALAR(sy, p_vl1t1, y_luvt);
LINE_ADD(p_ult, p_ult, sy);
}
line_t xu;
LINE_LOAD(xu, d_x, u);
LINE_MUL(p_ult, p_ult, xu);
line_t p_ult1;
index_t i_ult1 = TEMP_PATH_LINE_IDX2(n, k, tmax, l, t-1, u);
LINE_LOAD(p_ult1, d_s2, i_ult1);
LINE_ADD(p_ult, p_ult, p_ult1);
index_t i_ult = TEMP_PATH_LINE_IDX2(n, k, tmax, l, t, u);
LINE_STORE(d_s2, i_ult, p_ult); // W: ng scalar_t
}
}
// total edges at time `t`
index_t m_t = d_pos[n*(t-1) + n-1] - d_pos[n*(t-1)] - (n-1) +
d_adj[d_pos[n*(t-1)+(n-1)]];
trans_bytes += ((2*n*tmax)+m_t)*sizeof(index_t) + (2*n+m_t)*g*sizeof(scalar_t);
num_muls += (n*g+m_t);
}
scalar_t k_temp_path_genf2(index_t n,
index_t m,
index_t k,
index_t tmax,
index_t g,
index_t vert_loc,
index_t *d_pos,
index_t *d_adj,
ffprng_scalar_t y_seed,
line_array_t *d_x,
scalar_t *vs)
{
assert( g == SCALARS_IN_LINE);
assert( k >= 1);
line_array_t *d_s1= (line_array_t *) MALLOC(LINE_ARRAY_SIZE((tmax+1)*n*g));
line_array_t *d_s2= (line_array_t *) MALLOC(LINE_ARRAY_SIZE((tmax+1)*n*g));
init_ds_genf2(n, 1, tmax, d_s2);
// initialise: l = 1
for(index_t u = 0; u < n; u++) {
for(index_t t = 0; t <= tmax; t++) {
line_t xu;
LINE_LOAD(xu, d_x, u);
index_t i_u_1 = TEMP_PATH_LINE_IDX2(n, k, tmax, 1, t, u);
LINE_STORE(d_s1, i_u_1, xu);
}
}
srand(y_seed);
for(index_t l = 2; l <= k; l++) {
ffprng_scalar_t yl_seed = irand(); // new seed for each l
for(index_t t = l-1; t <= tmax; t++) {
k_temp_path_genf2_round(n, m, k, tmax, t, g, l,
d_pos, d_adj, yl_seed, d_x,
d_s1, d_s2);
}
//fprintf(stdout, "\n\n----------------\n");
//fprintf(stdout, "l : %ld\n", l);
//fprintf(stdout, "d_s1 before init\n");
//print_ds(n, 1, tmax, d_s1);
//fprintf(stdout, "\n\n----------------\n");
//fprintf(stdout, "d_s2 before init\n");
//print_ds(n, 1, tmax, d_s2);
// swap and initialise
line_array_t *d_temp = d_s1;
d_s1 = d_s2;
d_s2 = d_temp;
init_ds_genf2(n, 1, tmax, d_s2);
//fprintf(stdout, "\n\n----------------\n");
//fprintf(stdout, "d_s1 after init\n");
//print_ds(n, 1, tmax, d_s1);
//fprintf(stdout, "\n\n----------------\n");
//fprintf(stdout, "d_s1 after init\n");
//print_ds(n, 1, tmax, d_s2);
}
// sum up
index_t ii = TEMP_PATH_LINE_IDX2(n, k, tmax, 1, tmax, 0);
scalar_t sum = line_sum(n, g, ((line_array_t *)(((line_t *) d_s1)+ii)));
// vertex-localisation
if(vert_loc) {
vertex_acc(n, g, k, ((line_array_t *)(((line_t *) d_s1)+ii)), vs);
}
// free memory
FREE(d_s1);
FREE(d_s2);
return sum;
}
#endif
/************************************************************ The oracle(s). */
index_t temppath_oracle(index_t n,
index_t k,
index_t tmax,
index_t *h_pos,
index_t *h_adj,
index_t num_shades,
shade_map_t *h_s,
ffprng_scalar_t y_seed,
ffprng_scalar_t z_seed,
index_t vert_loc,
scalar_t *master_vsum)
{
push_memtrack();
assert(k >= 1 && k < 31);
//index_t m = h_pos[n-1]+h_adj[h_pos[n-1]]+1-n;
index_t m = h_pos[n*(tmax-1)+n-1]+h_adj[h_pos[n*(tmax-1)+n-1]]+1-(n*tmax);
index_t sum_size = 1 << k;
index_t g = SCALARS_IN_LINE;
index_t outer = (sum_size + g-1) / g;
// number of iterations for outer loop
num_muls = 0;
trans_bytes = 0;
index_t *d_pos = h_pos;
index_t *d_adj = h_adj;
line_array_t *d_x = (line_array_t *) MALLOC(LINE_ARRAY_SIZE(n*g));
/* Run the work & time it. */
push_time();
scalar_t master_sum;
SCALAR_SET_ZERO(master_sum);
if(vert_loc) {
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < n; i++)
master_vsum[i] = 0;
}
for(index_t out = 0; out < outer; out++) {
constrained_sieve_pre(n, k, g, g*out, num_shades, h_s, z_seed, d_x);
#if BUILD_GENF == 1
#define GENF_TYPE "k_temp_path_genf1"
scalar_t sum = k_temp_path_genf1(n, m, k, tmax, g, vert_loc, d_pos, d_adj, y_seed, d_x, master_vsum);
#elif BUILD_GENF == 2
#define GENF_TYPE "k_temp_path_genf2"
scalar_t sum = k_temp_path_genf2(n, m, k, tmax, g, vert_loc, d_pos, d_adj, y_seed, d_x, master_vsum);
#else
#error BUILD_GENF should be either 1 or 2
#endif
SCALAR_ADD(master_sum, master_sum, sum);
}
double time = pop_time();
double trans_rate = trans_bytes / (time/1000.0);
double mul_rate = num_muls / time;
FREE(d_x);
fprintf(stdout,
SCALAR_FORMAT_STRING
" %.2lf ms [%.2lfGiB/s, %.2lfGHz] %d",
(long) master_sum,
time,
trans_rate/((double) (1 << 30)),
mul_rate/((double) 1e6),
master_sum != 0);
fprintf(stdout, " ");
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fflush(stdout);
return master_sum != 0;
}
/************************************** k-path generating function (mark 1). */
#if BUILD_GENF == 1
#define PATH_LINE_IDX(b, k, l, u) ((k)*(u)+(l)-1)
#ifdef DEBUG
void print_kpath_ds(index_t n,
index_t k,
line_array_t *d_s)
{
for(index_t l = 1; l <= k; l++) {
fprintf(stdout,"-------------------------------------------------\n");
fprintf(stdout, "l: %ld\n", l);
fprintf(stdout,"-------------------------------------------------\n");
for(index_t u = 0; u < n; u++) {
fprintf(stdout, "%ld: ", u+1);
index_t i_u_l = PATH_LINE_IDX(b, k, l, u);
line_t pul;
LINE_LOAD(pul, d_s, i_u_l);
PRINT_LINE(pul);
scalar_t sum;
LINE_SUM(sum, pul);
fprintf(stdout, "line sum: "SCALAR_FORMAT_STRING"\n",sum);
}
}
}
#endif
void k_path_genf1_round(index_t n,
index_t m,
index_t k,
index_t g,
index_t l,
index_t *d_pos,
index_t *d_adj,
ffprng_scalar_t yl_seed,
line_array_t *d_s)
{
assert(g == SCALARS_IN_LINE);
index_t nt = num_threads();
index_t length = n;
index_t block_size = length/nt;
ffprng_t y_base;
FFPRNG_INIT(y_base, yl_seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
ffprng_t y_gen;
index_t y_pos = d_pos[start]-start;
FFPRNG_FWD(y_gen, y_pos, y_base);
for(index_t u = start; u <= stop; u++) {
index_t pu = d_pos[u]; // R: n index_t [hw pref]
index_t deg = d_adj[pu]; // R: n index_t [hw pref]
line_t pul;
LINE_SET_ZERO(pul);
for(index_t j = 1; j <= deg; j++) {
index_t v = d_adj[pu+j]; // R: m index_t [hw pref]
line_t pvl1;
index_t i_v_l1 = PATH_LINE_IDX(b, k, l-1, v);
LINE_LOAD(pvl1, d_s, i_v_l1);
#ifdef BUILD_PREFETCH
// prefetch next line
index_t nv = d_adj[pu+j+(j < deg ? 1 : 2)];
index_t i_nv_l1 = PATH_LINE_IDX(b, k, l-1, nv);
LINE_PREFETCH(d_s, i_nv_l1);
#endif
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, y_gen);
scalar_t y_luv = (scalar_t) rnd;
line_t sy;
LINE_MUL_SCALAR(sy, pvl1, y_luv); // MUL: ng
LINE_ADD(pul, pul, sy);
}
line_t pul0;
index_t i_u_l0 = PATH_LINE_IDX(b, k, 1, u);
LINE_LOAD(pul0, d_s, i_u_l0);
LINE_MUL(pul, pul, pul0);
index_t i_u_l = PATH_LINE_IDX(b, k, l, u);
LINE_STORE(d_s, i_u_l, pul); // W: ng scalar_t
}
}
trans_bytes += (2*n+m)*sizeof(index_t) + (m+n)*g*sizeof(scalar_t);
num_muls += (n*g+m);
}
scalar_t k_path_genf1(index_t n,
index_t m,
index_t k,
index_t g,
index_t *d_pos,
index_t *d_adj,
ffprng_scalar_t y_seed,
line_array_t *d_x,
scalar_t *vs)
{
assert(g == SCALARS_IN_LINE);
assert(k >= 1);
line_array_t *d_s = (line_array_t *) MALLOC(LINE_ARRAY_SIZE(k*n*g));
// Save the base case to d_s
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
line_t xu;
LINE_LOAD(xu, d_x, u); // R: ng scalar_t [hw prefetched]
index_t i_u_1 = PATH_LINE_IDX(b, k, 1, u);
LINE_STORE(d_s, i_u_1, xu); // W: ng scalar_t
}
// Run the recurrence
srand(y_seed);
for(index_t l = 2; l <= k; l++) {
ffprng_scalar_t yl_seed = irand(); // different y-values for every round
k_path_genf1_round(n,m,k,g,l,d_pos,d_adj,yl_seed,d_s);
}
// Sum up
scalar_t sum = line_sum_stride(n, g, k,
((line_array_t *)(((line_t *) d_s) + k-1)));
// vertex localisation
vertex_acc_stride(n,
g,
k,
((line_array_t *)(((line_t *) d_s) + k-1)),
vs);
FREE(d_s);
trans_bytes += 2*n*g*sizeof(scalar_t);
num_muls += 0;
return sum;
}
#endif
/************************************** k-path generating function (mark 2). */
#if BUILD_GENF == 2
#define PATH_LINE_IDX2(k, l, u) ((u))
#ifdef DEBUG
void print_kpath_ds_genf2(index_t n,
index_t k,
line_array_t *d_s)
{
for(index_t l = 1; l <= k; l++) {
fprintf(stdout,"-------------------------------------------------\n");
fprintf(stdout, "l: %ld\n", l);
fprintf(stdout,"-------------------------------------------------\n");
for(index_t u = 0; u < n; u++) {
fprintf(stdout, "%ld: ", u+1);
index_t i_u_l = PATH_LINE_IDX2(k, l, u);
line_t pul;
LINE_LOAD(pul, d_s, i_u_l);
PRINT_LINE(pul);
scalar_t sum;
LINE_SUM(sum, pul);
fprintf(stdout, "line sum: "SCALAR_FORMAT_STRING"\n",sum);
}
}
}
#endif
void k_path_genf2_round(index_t n,
index_t m,
index_t k,
index_t g,
index_t l,
index_t *d_pos,
index_t *d_adj,
ffprng_scalar_t yl_seed,
line_array_t *d_x,
line_array_t *d_s1,
line_array_t *d_s2)
{
assert(g == SCALARS_IN_LINE);
index_t nt = num_threads();
index_t length = n;
index_t block_size = length/nt;
ffprng_t y_base;
FFPRNG_INIT(y_base, yl_seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
ffprng_t y_gen;
index_t y_pos = d_pos[start]-start;
FFPRNG_FWD(y_gen, y_pos, y_base);
for(index_t u = start; u <= stop; u++) {
index_t pu = d_pos[u]; // R: n index_t [hw pref]
index_t deg = d_adj[pu]; // R: n index_t [hw pref]
line_t pul;
LINE_SET_ZERO(pul);
for(index_t j = 1; j <= deg; j++) {
index_t v = d_adj[pu+j]; // R: m index_t [hw pref]
line_t pvl1;
index_t i_v_l1 = PATH_LINE_IDX2(k, l-1, v);
LINE_LOAD(pvl1, d_s1, i_v_l1);
#ifdef BUILD_PREFETCH
// prefetch next line
index_t nv = d_adj[pu+j+(j < deg ? 1 : 2)];
index_t i_nv_l1 = PATH_LINE_IDX2(k, l-1, nv);
LINE_PREFETCH(d_s1, i_nv_l1);
#endif
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, y_gen);
scalar_t y_luv = (scalar_t) rnd;
line_t sy;
LINE_MUL_SCALAR(sy, pvl1, y_luv); // MUL: ng
LINE_ADD(pul, pul, sy);
}
line_t xu;
LINE_LOAD(xu, d_x, u);
LINE_MUL(pul, pul, xu);
index_t i_u_l = PATH_LINE_IDX2(k, l, u);
LINE_STORE(d_s2, i_u_l, pul); // W: ng scalar_t
}
}
trans_bytes += (2*n+m)*sizeof(index_t) + (m+n)*g*sizeof(scalar_t);
num_muls += (n*g+m);
}
scalar_t k_path_genf2(index_t n,
index_t m,
index_t k,
index_t g,
index_t *d_pos,
index_t *d_adj,
ffprng_scalar_t y_seed,
line_array_t *d_x,
scalar_t *vs)
{
assert(g == SCALARS_IN_LINE);
assert(k >= 1);
line_array_t *d_s1 = (line_array_t *) MALLOC(LINE_ARRAY_SIZE(n*g));
line_array_t *d_s2 = (line_array_t *) MALLOC(LINE_ARRAY_SIZE(n*g));
// initialise x_u to d_s1
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
line_t xu;
LINE_LOAD(xu, d_x, u); // R: ng scalar_t [hw prefetched]
index_t i_u_1 = PATH_LINE_IDX2(k, 1, u);
LINE_STORE(d_s1, i_u_1, xu); // W: ng scalar_t
}
// Run the recurrence
srand(y_seed);
for(index_t l = 2; l <= k; l++) {
ffprng_scalar_t yl_seed = irand(); // different y-values for every round
k_path_genf2_round(n, m, k, g, l, d_pos, d_adj, yl_seed, d_x, d_s1, d_s2);
// swap array pointers
line_array_t *d_temp = d_s1;
d_s1 = d_s2;
d_s2 = d_temp;
}
// Sum up
scalar_t sum = line_sum(n, g, ((line_array_t *)(((line_t *) d_s1))));
// vertex localisation
vertex_acc(n, g, k, ((line_array_t *)(((line_t *) d_s1))), vs);
// free memory
FREE(d_s1);
FREE(d_s2);
trans_bytes += 2*n*g*sizeof(scalar_t);
num_muls += 0;
return sum;
}
#endif
/************************************************************ The oracle(s). */
index_t path_oracle(index_t n,
index_t k,
index_t *h_pos,
index_t *h_adj,
index_t num_shades,
shade_map_t *h_s,
ffprng_scalar_t y_seed,
ffprng_scalar_t z_seed,
scalar_t *master_vsum)
{
push_memtrack();
assert(k >= 1 && k < 31);
index_t m = h_pos[n-1]+h_adj[h_pos[n-1]]+1-n;
index_t sum_size = 1 << k;
index_t g = SCALARS_IN_LINE;
index_t outer = (sum_size + g-1) / g;
// number of iterations for outer loop
num_muls = 0;
trans_bytes = 0;
index_t *d_pos = h_pos;
index_t *d_adj = h_adj;
line_array_t *d_x = (line_array_t *) MALLOC(LINE_ARRAY_SIZE(n*g));
/* Run the work & time it. */
push_time();
scalar_t master_sum;
SCALAR_SET_ZERO(master_sum);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < n; i++)
master_vsum[i] = 0;
for(index_t out = 0; out < outer; out++) {
constrained_sieve_pre(n, k, g, g*out, num_shades, h_s, z_seed, d_x);
#if BUILD_GENF == 1
scalar_t sum = k_path_genf1(n, m, k, g, d_pos, d_adj, y_seed, d_x, master_vsum);
#elif BUILD_GENF == 2
scalar_t sum = k_path_genf2(n, m, k, g, d_pos, d_adj, y_seed, d_x, master_vsum);
#else
#error BUILD_GENF should be either 1 or 2
#endif
SCALAR_ADD(master_sum, master_sum, sum);
}
double time = pop_time();
double trans_rate = trans_bytes / (time/1000.0);
double mul_rate = num_muls / time;
FREE(d_x);
fprintf(stdout,
SCALAR_FORMAT_STRING
" %.2lf ms [%.2lfGiB/s, %.2lfGHz] %d",
(long) master_sum,
time,
trans_rate/((double) (1 << 30)),
mul_rate/((double) 1e6),
master_sum != 0);
fprintf(stdout, " ");
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fflush(stdout);
return master_sum != 0;
}
/************************************************* Rudimentary graph builder. */
typedef struct
{
index_t is_directed;
index_t num_vertices;
index_t num_edges;
index_t max_time;
index_t edge_capacity;
index_t *edges;
index_t *colors;
} graph_t;
static index_t *enlarge(index_t m, index_t m_was, index_t *was)
{
assert(m >= 0 && m_was >= 0);
index_t *a = (index_t *) MALLOC(sizeof(index_t)*m);
index_t i;
if(was != (void *) 0) {
for(i = 0; i < m_was; i++) {
a[i] = was[i];
}
FREE(was);
}
return a;
}
graph_t *graph_alloc(index_t n)
{
assert(n >= 0);
index_t i;
graph_t *g = (graph_t *) MALLOC(sizeof(graph_t));
g->is_directed = 0; // default: undirected graph
g->num_vertices = n;
g->num_edges = 0;
g->edge_capacity = 100;
g->edges = enlarge(3*g->edge_capacity, 0, (void *) 0);
g->colors = (index_t *) MALLOC(sizeof(index_t)*n);
for(i = 0; i < n; i++)
g->colors[i] = UNDEFINED;
return g;
}
void graph_free(graph_t *g)
{
FREE(g->edges);
FREE(g->colors);
FREE(g);
}
void graph_add_edge(graph_t *g, index_t u, index_t v, index_t t)
{
assert(u >= 0 &&
v >= 0 &&
u < g->num_vertices &&
v < g->num_vertices);
assert(t>=0);
//assert(t>=0 && t < g->max_time);
if(g->num_edges == g->edge_capacity) {
g->edges = enlarge(6*g->edge_capacity, 3*g->edge_capacity, g->edges);
g->edge_capacity *= 2;
}
assert(g->num_edges < g->edge_capacity);
index_t *e = g->edges + 3*g->num_edges;
e[0] = u;
e[1] = v;
e[2] = t;
g->num_edges++;
}
index_t *graph_edgebuf(graph_t *g, index_t cap)
{
g->edges = enlarge(3*g->edge_capacity+3*cap, 3*g->edge_capacity, g->edges);
index_t *e = g->edges + 3*g->num_edges;
g->edge_capacity += cap;
g->num_edges += cap;
return e;
}
void graph_set_color(graph_t *g, index_t u, index_t c)
{
assert(u >= 0 && u < g->num_vertices && c >= 0);
g->colors[u] = c;
}
void graph_set_is_directed(graph_t *g, index_t is_dir)
{
assert(is_dir == 0 || is_dir == 1);
g->is_directed = is_dir;
}
void graph_set_max_time(graph_t *g, index_t tmax)
{
assert(tmax > 0);
g->max_time = tmax;
}
#ifdef DEBUG
void print_graph(graph_t *g)
{
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
fprintf(stdout, "p motif %ld %ld %ld\n", n, m, tmax);
index_t *e = g->edges;
for(index_t i = 0; i < 3*m; i+=3) {
fprintf(stdout, "e %ld %ld %ld\n",
e[i]+1, e[i+1]+1, e[i+2]+1);
}
index_t *c = g->colors;
for(index_t i = 0; i < n; i++)
fprintf(stdout, "n %ld %ld\n", i+1, c[i]+1);
}
#endif
/************************************* Basic motif query processing routines. */
struct temppathq_struct
{
index_t is_stub;
index_t n;
index_t k;
index_t tmax;
index_t *pos;
index_t *adj;
index_t nl;
index_t *l;
index_t ns;
shade_map_t *shade;
index_t vert_loc;
scalar_t *vsum;
};
typedef struct temppathq_struct temppathq_t;
void adjsort(index_t n, index_t *pos, index_t *adj)
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t pu = pos[u];
index_t deg = adj[pu];
heapsort_indext(deg, adj + pu + 1);
}
}
void temppathq_free(temppathq_t *q)
{
if(!q->is_stub) {
FREE(q->pos);
FREE(q->adj);
FREE(q->l);
FREE(q->shade);
FREE(q->vsum);
}
FREE(q);
}
index_t temppathq_execute(temppathq_t *q)
{
if(q->is_stub)
return 0;
return temppath_oracle(q->n, q->k, q->tmax, q->pos, q->adj, q->ns, q->shade,
irand(), irand(), q->vert_loc, q->vsum);
}
#ifdef DEBUG
void print_temppathq(temppathq_t *q)
{
index_t n = q->n;
index_t k = q->k;
index_t tmax = q->tmax;
index_t *pos = q->pos;
index_t *adj = q->adj;
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "printing temppathq\n");
fprintf(stdout, "is_stub = %ld\n", q->is_stub);
fprintf(stdout, "n = %ld\n", n);
fprintf(stdout, "k = %ld\n", k);
fprintf(stdout, "tmax = %ld\n", tmax);
fprintf(stdout, "pos\n");
fprintf(stdout, "----\n ");
for(index_t i = 0; i < n*tmax; i++) {
fprintf(stdout, "%ld%s", pos[i], i%n==n-1 ? "\n ":" ");
}
fprintf(stdout, "adjacency list:\n");
fprintf(stdout, "---------------\n");
for(index_t t = 0; t < tmax; t++) {
fprintf(stdout, "t: %ld\n", t+1);
fprintf(stdout, "---------------\n");
index_t *pos_t = pos + n*t;
for(index_t u = 0; u < n; u++) {
index_t pu = pos_t[u];
index_t nu = adj[pu];
index_t *adj_u = adj + pu + 1;
fprintf(stdout, "%4ld:", u+1);
for(index_t i = 0; i < nu; i++) {
fprintf(stdout, " %4ld", adj_u[i]+1);
}
fprintf(stdout, "\n");
}
}
index_t nl = q->nl;
index_t *l = q->l;
fprintf(stdout, "nl = %ld\n", nl);
fprintf(stdout, "l:\n");
for(index_t i = 0; i < nl; i++)
fprintf(stdout, "%8ld : %8ld\n", nl, l[i]);
index_t ns = q ->ns;
shade_map_t *shade = q->shade;
fprintf(stdout, "ns : %ld\n", ns);
fprintf(stdout, "shades:\n");
for(index_t u = 0; u < n; u++) {
fprintf(stdout, "%10ld : 0x%08X\n", u+1, shade[u]);
}
scalar_t *vsum = q->vsum;
fprintf(stdout, "vert_loc: %ld\n", q->vert_loc);
fprintf(stdout, "vsum:\n");
for(index_t u = 0; u < n; u++)
fprintf(stdout, "%10ld : "SCALAR_FORMAT_STRING"\n", u+1, vsum[u]);
fprintf(stdout, "-----------------------------------------------\n");
}
void print_array(const char *name, index_t n, index_t *a, index_t offset)
{
fprintf(stdout, "%s (%ld):", name, n);
for(index_t i = 0; i < n; i++) {
fprintf(stdout, " %ld", a[i] == -1 ? -1 : a[i]+offset);
}
fprintf(stdout, "\n");
}
#endif
/*************************************************** basic path query routine */
struct pathq_struct
{
index_t is_stub;
index_t n;
index_t k;
index_t *pos;
index_t *adj;
index_t nl;
index_t *l;
index_t ns;
shade_map_t *shade;
scalar_t *vsum;
};
typedef struct pathq_struct pathq_t;
void pathq_free(pathq_t *q)
{
if(!q->is_stub) {
FREE(q->pos);
FREE(q->adj);
FREE(q->l);
FREE(q->shade);
FREE(q->vsum);
}
FREE(q);
}
#ifdef DEBUG
void print_pathq(pathq_t *q)
{
index_t n = q->n;
index_t k = q->k;
index_t *pos = q->pos;
index_t *adj = q->adj;
index_t nl = q->nl;
index_t *l = q->l;
index_t ns = q ->ns;
shade_map_t *shade = q->shade;
scalar_t *vsum = q->vsum;
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "printing pathq\n");
fprintf(stdout, "is_stub : %ld\n", q->is_stub);
fprintf(stdout, "n : %ld\n", n);
fprintf(stdout, "k : %ld\n", k);
fprintf(stdout, "pos\n");
fprintf(stdout, "----\n ");
for(index_t i = 0; i < n; i++) {
fprintf(stdout, "%4ld%s", pos[i], i%n==n-1 ? "\n ":" ");
}
fprintf(stdout, "adjacency list:\n");
fprintf(stdout, "---------------\n");
for(index_t u = 0; u < n; u++) {
index_t pu = pos[u];
index_t nu = adj[pu];
index_t *adj_u = adj + pu + 1;
fprintf(stdout, "%4ld:", u+1);
for(index_t i = 0; i < nu; i++) {
fprintf(stdout, " %4ld", adj_u[i]+1);
}
fprintf(stdout, "\n");
}
fprintf(stdout, "nl = %ld\n", nl);
fprintf(stdout, "l:\n");
for(index_t i = 0; i < nl; i++)
fprintf(stdout, "%8ld : %8ld\n", nl, l[i]);
fprintf(stdout, "ns : %ld\n", ns);
fprintf(stdout, "shades:\n");
for(index_t u = 0; u < n; u++) {
fprintf(stdout, "%10ld : 0x%08X\n", u+1, shade[u]);
}
fprintf(stdout, "vsum:\n");
for(index_t u = 0; u < n; u++)
fprintf(stdout, "%10ld : "SCALAR_FORMAT_STRING"\n", u+1, vsum[u]);
fprintf(stdout, "-----------------------------------------------\n");
}
#endif
pathq_t * build_pathq(temppathq_t *in)
{
push_memtrack();
index_t n = in->n;
index_t tmax = in->tmax;
index_t *i_pos = in->pos;
index_t *i_adj = in->adj;
shade_map_t *i_shade = in->shade;
push_time();
fprintf(stdout, "build pathq: ");
fflush(stdout);
push_time();
// output position list
index_t *c_pos = (index_t *) MALLOC(sizeof(index_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
c_pos[u] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *i_pos_t = i_pos + t*n;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
c_pos[u] += i_nu;
}
}
index_t c_m = parallelsum(n, c_pos);
index_t c_run = prefixsum(n, c_pos, 1);
assert(c_run == n+c_m);
fprintf(stdout, "[pos: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
index_t *c_adj = (index_t *) MALLOC(sizeof(index_t)*(n+c_m));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
c_adj[c_pos[u]] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *i_pos_t = i_pos + t*n;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
index_t o_pu = c_pos[u];
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
c_adj[o_pu + 1 + c_adj[o_pu]++] = v;
}
}
}
adjsort(n, c_pos, c_adj);
index_t *o_pos = (index_t *) MALLOC(sizeof(index_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_pos[u] = 0;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t c_pu = c_pos[u];
index_t c_nu = c_adj[c_pu];
if(c_nu == 0 || c_nu == 1) {
o_pos[u] = c_nu;
continue;
}
o_pos[u] = 1;
index_t *c_adj_u = c_adj + c_pu;
for(index_t j = 2; j <= c_nu; j++) {
if(c_adj_u[j-1] != c_adj_u[j]) {
o_pos[u]++;
}
}
}
index_t o_m = parallelsum(n, o_pos);
index_t o_run = prefixsum(n, o_pos, 1);
assert(o_run==n+o_m);
index_t *o_adj = (index_t *) MALLOC(sizeof(index_t)*(n+o_m));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_adj[o_pos[u]] = 0;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t c_pu = c_pos[u];
index_t c_nu = c_adj[c_pu];
if(c_nu == 0) continue;
index_t o_pu = o_pos[u];
index_t *c_adj_u = c_adj + c_pu;
o_adj[o_pu + 1 + o_adj[o_pu]++] = c_adj_u[1];
for(index_t j = 2; j <= c_nu; j++) {
if(c_adj_u[j-1] != c_adj_u[j]) {
o_adj[o_pu + 1 + o_adj[o_pu]++] = c_adj_u[j];
}
}
}
fprintf(stdout, "[adj: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
shade_map_t *o_shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_shade[u] = i_shade[u];
fprintf(stdout, "[shade: %.2lf ms] ", pop_time());
fprintf(stdout, "done. [%.2lf ms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
FREE(c_pos);
FREE(c_adj);
pathq_t *out = (pathq_t *) MALLOC(sizeof(pathq_t));
out->is_stub = 0;
out->n = n;
out->k = in->k;
out->pos = o_pos;
out->adj = o_adj;
out->nl = 0;
out->l = (index_t *) MALLOC(sizeof(index_t)*out->nl);
out->ns = in->ns;
out->shade = o_shade;
out->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*n);
return out;
}
// A quick fix to support vertex localised sieving for undirected graphs
pathq_t * build_pathq_dir(temppathq_t *in)
{
push_memtrack();
index_t n = in->n;
index_t tmax = in->tmax;
index_t *i_pos = in->pos;
index_t *i_adj = in->adj;
shade_map_t *i_shade = in->shade;
push_time();
fprintf(stdout, "build pathq: ");
fflush(stdout);
push_time();
// output position list
index_t *c_pos = (index_t *) MALLOC(sizeof(index_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
c_pos[u] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *i_pos_t = i_pos + t*n;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
c_pos[u] += i_nu;
}
}
index_t c_m = parallelsum(n, c_pos);
index_t c_run = prefixsum(n, c_pos, 1);
assert(c_run == n+c_m);
fprintf(stdout, "[pos: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
index_t *c_adj = (index_t *) MALLOC(sizeof(index_t)*(n+c_m));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
c_adj[c_pos[u]] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *i_pos_t = i_pos + t*n;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
index_t o_pu = c_pos[u];
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
c_adj[o_pu + 1 + c_adj[o_pu]++] = v;
}
}
}
adjsort(n, c_pos, c_adj);
index_t *o_pos = (index_t *) MALLOC(sizeof(index_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_pos[u] = 0;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t c_pu = c_pos[u];
index_t c_nu = c_adj[c_pu];
if(c_nu == 0 || c_nu == 1) {
o_pos[u] = c_nu;
continue;
}
o_pos[u] = 1;
index_t *c_adj_u = c_adj + c_pu;
for(index_t j = 2; j <= c_nu; j++) {
if(c_adj_u[j-1] != c_adj_u[j]) {
o_pos[u]++;
}
}
}
index_t o_m = parallelsum(n, o_pos);
index_t o_run = prefixsum(n, o_pos, 1);
assert(o_run==n+o_m);
index_t *o_adj = (index_t *) MALLOC(sizeof(index_t)*(n+o_m));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_adj[o_pos[u]] = 0;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t c_pu = c_pos[u];
index_t c_nu = c_adj[c_pu];
if(c_nu == 0) continue;
index_t o_pu = o_pos[u];
index_t *c_adj_u = c_adj + c_pu;
o_adj[o_pu + 1 + o_adj[o_pu]++] = c_adj_u[1];
for(index_t j = 2; j <= c_nu; j++) {
if(c_adj_u[j-1] != c_adj_u[j]) {
o_adj[o_pu + 1 + o_adj[o_pu]++] = c_adj_u[j];
}
}
}
// convert directed to undirected graph
index_t *o_pos_ud = (index_t *) MALLOC(n*sizeof(index_t));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_pos_ud[u] = o_adj[o_pos[u]];
index_t nt = num_threads();
index_t block_size = n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t u = 0; u < n; u++) {
index_t o_pu = o_pos[u];
index_t *o_adj_u = o_adj + o_pu;
index_t o_nu = o_adj_u[0];
for(index_t j = 1; j <= o_nu; j++) {
index_t v = o_adj_u[j];
if(start <= v && v <= stop)
o_pos_ud[v]++;
}
}
}
index_t o_m_ud = parallelsum(n, o_pos_ud);
index_t o_run_ud = prefixsum(n, o_pos_ud, 1);
assert(o_run_ud == n+o_m_ud);
assert(o_m_ud == 2*o_m);
index_t *o_adj_ud = (index_t *) MALLOC((n+o_m_ud)*sizeof(index_t));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_adj_ud[o_pos_ud[u]] = 0;
// first copy the adjacency list as it is
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t o_pu = o_pos[u];
index_t *o_adj_u = o_adj + o_pu;
index_t o_nu = o_adj_u[0];
index_t o_pu_ud = o_pos_ud[u];
index_t *o_adj_ud_u = o_adj_ud + o_pu_ud;
o_adj_ud_u[0] = o_nu;
for(index_t j = 1; j <= o_nu; j++) {
index_t v = o_adj_u[j];
o_adj_ud_u[j] = v;
}
}
// add edges in other direction now
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t u = 0; u < n; u++) {
index_t o_pu = o_pos[u];
index_t *o_adj_u = o_adj + o_pu;
index_t o_nu = o_adj_u[0];
for(index_t j = 1; j <= o_nu; j++) {
index_t v = o_adj_u[j];
if(start <= v && v <= stop) {
index_t o_pv_ud = o_pos_ud[v];
o_adj_ud[o_pv_ud + 1 + o_adj_ud[o_pv_ud]++] = u;
}
}
}
}
FREE(o_pos);
FREE(o_adj);
fprintf(stdout, "[adj: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
shade_map_t *o_shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
o_shade[u] = i_shade[u];
fprintf(stdout, "[shade: %.2lf ms] ", pop_time());
fprintf(stdout, "done. [%.2lf ms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
FREE(c_pos);
FREE(c_adj);
pathq_t *out = (pathq_t *) MALLOC(sizeof(pathq_t));
out->is_stub = 0;
out->n = n;
out->k = in->k;
out->pos = o_pos_ud;
out->adj = o_adj_ud;
out->nl = 0;
out->l = (index_t *) MALLOC(sizeof(index_t)*out->nl);
out->ns = in->ns;
out->shade = o_shade;
out->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*n);
return out;
}
scalar_t pathq_execute(pathq_t *q)
{
if(q->is_stub)
return 0;
return path_oracle(q->n, q->k, q->pos, q->adj, q->ns, q->shade, irand(), irand(), q->vsum);
}
/*************** Project a query by cutting out a given interval of vertices. */
index_t get_poscut(index_t n, index_t tmax,
index_t *pos, index_t *adj,
index_t lo_v, index_t hi_v,
index_t *poscut)
{
// Note: assumes the adjacency lists are sorted
assert(lo_v <= hi_v);
index_t ncut = n - (hi_v-lo_v+1);
for(index_t t = 0; t < tmax; t++) {
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < lo_v; u++) {
index_t pu = pos[t*n + u];
index_t deg = adj[pu];
index_t cs, ce;
index_t l = get_interval(deg, adj + pu + 1,
lo_v, hi_v,
&cs, &ce);
poscut[t*ncut + u] = deg - l;
}
}
for(index_t t = 0; t < tmax; t++) {
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = hi_v+1; u < n; u++) {
index_t pu = pos[t*n + u];
index_t deg = adj[pu];
index_t cs, ce;
index_t l = get_interval(deg, adj + pu + 1,
lo_v, hi_v,
&cs, &ce);
poscut[t*ncut + (u-hi_v-1+lo_v)] = deg - l;
}
}
index_t run = prefixsum(tmax*ncut, poscut, 1);
return run;
}
temppathq_t *temppathq_cut(temppathq_t *q, index_t lo_v, index_t hi_v)
{
// Note: assumes the adjacency lists are sorted
//fprintf(stdout, "-------------------------------\n");
//fprintf(stdout, "low: %ld, high: %ld\n", lo_v, hi_v);
//print_temppathq(q);
index_t n = q->n;
index_t tmax = q->tmax;
index_t *pos = q->pos;
index_t *adj = q->adj;
assert(0 <= lo_v && lo_v <= hi_v && hi_v < n);
// Fast-forward a stub NO when the interval
// [lo_v,hi_v] contains an element in q->l
for(index_t i = 0; i < q->nl; i++) {
if(q->l[i] >= lo_v && q->l[i] <= hi_v) {
temppathq_t *qs = (temppathq_t *) MALLOC(sizeof(temppathq_t));
qs->is_stub = 1;
return qs;
}
}
index_t ncut = n - (hi_v-lo_v+1); // number of vertices after cut
index_t *poscut = alloc_idxtab(tmax*ncut);
index_t bcut = get_poscut(n, tmax, pos, adj, lo_v, hi_v, poscut);
index_t *adjcut = alloc_idxtab(bcut);
index_t gap = hi_v-lo_v+1;
//print_array("poscut", tmax*ncut, poscut, 0);
for(index_t t = 0; t < tmax; t++) {
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t v = 0; v < ncut; v++) {
index_t u = v;
if(u >= lo_v)
u += gap;
index_t pu = pos[t*n + u];
index_t degu = adj[pu];
index_t cs, ce;
index_t l = get_interval(degu, adj + pu + 1,
lo_v, hi_v,
&cs, &ce);
index_t pv = poscut[t*ncut + v];
index_t degv = degu - l;
adjcut[pv] = degv;
// could parallelize this too
for(index_t i = 0; i < cs; i++)
adjcut[pv + 1 + i] = adj[pu + 1 + i];
// could parallelize this too
for(index_t i = cs; i < degv; i++)
adjcut[pv + 1 + i] = adj[pu + 1 + i + l] - gap;
}
}
//print_array("adj_cut", bcut, adjcut, 0);
temppathq_t *qq = (temppathq_t *) MALLOC(sizeof(temppathq_t));
qq->is_stub = 0;
qq->n = ncut;
qq->k = q->k;
qq->tmax = q->tmax;
qq->pos = poscut;
qq->adj = adjcut;
qq->nl = q->nl;
qq->l = (index_t *) MALLOC(sizeof(index_t)*qq->nl);
for(index_t i = 0; i < qq->nl; i++) {
index_t u = q->l[i];
assert(u < lo_v || u > hi_v);
if(u > hi_v)
u -= gap;
qq->l[i] = u;
}
qq->ns = q->ns;
qq->shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*ncut);
for(index_t v = 0; v < ncut; v++) {
index_t u = v;
if(u >= lo_v)
u += gap;
qq->shade[v] = q->shade[u];
}
qq->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*ncut);
//print_temppathq(qq);
//exit(0);
return qq;
}
/****************** Project a query with given projection & embedding arrays. */
#define PROJ_UNDEF 0xFFFFFFFFFFFFFFFFUL
index_t get_posproj(index_t n, index_t *pos, index_t *adj,
index_t nproj, index_t *proj, index_t *embed,
index_t *posproj)
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t v = 0; v < nproj; v++) {
index_t u = embed[v];
index_t pu = pos[u];
index_t deg = adj[pu];
index_t degproj = 0;
for(index_t i = 0; i < deg; i++) {
index_t w = proj[adj[pu + 1 + i]];
if(w != PROJ_UNDEF)
degproj++;
}
posproj[v] = degproj;
}
index_t run = prefixsum(nproj, posproj, 1);
return run;
}
temppathq_t *temppathq_project(temppathq_t *q,
index_t nproj, index_t *proj, index_t *embed,
index_t nl, index_t *l)
{
index_t n = q->n;
index_t *pos = q->pos;
index_t *adj = q->adj;
index_t *posproj = alloc_idxtab(nproj);
index_t bproj = get_posproj(n, pos, adj, nproj, proj, embed, posproj);
index_t *adjproj = alloc_idxtab(bproj);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t v = 0; v < nproj; v++) {
index_t pv = posproj[v];
index_t u = embed[v];
index_t pu = pos[u];
index_t deg = adj[pu];
index_t degproj = 0;
for(index_t i = 0; i < deg; i++) {
index_t w = proj[adj[pu + 1 + i]];
if(w != PROJ_UNDEF)
adjproj[pv + 1 + degproj++] = w;
}
adjproj[pv] = degproj;
}
temppathq_t *qq = (temppathq_t *) MALLOC(sizeof(temppathq_t));
qq->is_stub = 0;
qq->n = nproj;
qq->k = q->k;
qq->pos = posproj;
qq->adj = adjproj;
// Now project the l array
assert(q->nl == 0); // l array comes from lister
qq->nl = nl;
qq->l = (index_t *) MALLOC(sizeof(index_t)*nl);
for(index_t i = 0; i < nl; i++) {
index_t u = proj[l[i]];
assert(u != PROJ_UNDEF); // query is a trivial NO !
qq->l[i] = u;
}
// Next set up the projected shades
qq->ns = q->ns;
qq->shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*nproj);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t v = proj[u];
if(v != PROJ_UNDEF)
qq->shade[v] = q->shade[u];
}
// Reserve a unique shade to every vertex in l
// while keeping the remaining shades available
// Reserve shades first ...
index_t *l_shade = (index_t *) MALLOC(sizeof(index_t)*nl);
shade_map_t reserved_shades = 0;
for(index_t i = 0; i < nl; i++) {
index_t v = qq->l[i];
index_t j = 0;
for(; j < qq->ns; j++)
if(((qq->shade[v] >> j)&1) == 1 &&
((reserved_shades >> j)&1) == 0)
break;
assert(j < qq->ns);
reserved_shades |= 1UL << j;
l_shade[i] = j;
}
// ... then clear all reserved shades in one pass
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t v = 0; v < nproj; v++)
qq->shade[v] &= ~reserved_shades;
// ... and finally set reserved shades
for(index_t i = 0; i < nl; i++) {
index_t v = qq->l[i];
qq->shade[v] = 1UL << l_shade[i];
}
FREE(l_shade);
return qq;
}
/**************************************************** The interval extractor. */
struct ivlist_struct
{
index_t start;
index_t end;
struct ivlist_struct *prev;
struct ivlist_struct *next;
};
typedef struct ivlist_struct ivlist_t;
typedef struct ivext_struct
{
index_t n;
index_t k;
ivlist_t *queue;
ivlist_t *active_queue_head;
ivlist_t *spare_queue_head;
ivlist_t *embed_list;
} ivext_t;
void ivext_enqueue_spare(ivext_t *e, ivlist_t *iv)
{
pnlinknext(e->spare_queue_head,iv);
}
void ivext_enqueue_active(ivext_t *e, ivlist_t *iv)
{
pnlinkprev(e->active_queue_head,iv);
}
ivlist_t *ivext_dequeue_first_nonsingleton(ivext_t *e)
{
ivlist_t *iv = e->active_queue_head->next;
for(;
iv != e->active_queue_head;
iv = iv->next)
if(iv->end - iv->start + 1 > 1)
break;
assert(iv != e->active_queue_head);
pnunlink(iv);
return iv;
}
ivlist_t *ivext_get_spare(ivext_t *e)
{
assert(e->spare_queue_head->next != e->spare_queue_head);
ivlist_t *iv = e->spare_queue_head->next;
pnunlink(iv);
return iv;
}
void ivext_reset(ivext_t *e)
{
e->active_queue_head = e->queue + 0;
e->spare_queue_head = e->queue + 1;
e->active_queue_head->next = e->active_queue_head;
e->active_queue_head->prev = e->active_queue_head;
e->spare_queue_head->prev = e->spare_queue_head;
e->spare_queue_head->next = e->spare_queue_head;
e->embed_list = (ivlist_t *) 0;
for(index_t i = 0; i < e->k + 2; i++)
ivext_enqueue_spare(e, e->queue + 2 + i); // rot-safe
ivlist_t *iv = ivext_get_spare(e);
iv->start = 0;
iv->end = e->n-1;
ivext_enqueue_active(e, iv);
}
ivext_t *ivext_alloc(index_t n, index_t k)
{
ivext_t *e = (ivext_t *) MALLOC(sizeof(ivext_t));
e->n = n;
e->k = k;
e->queue = (ivlist_t *) MALLOC(sizeof(ivlist_t)*(k+4)); // rot-safe
ivext_reset(e);
return e;
}
void ivext_free(ivext_t *e)
{
ivlist_t *el = e->embed_list;
while(el != (ivlist_t *) 0) {
ivlist_t *temp = el;
el = el->next;
FREE(temp);
}
FREE(e->queue);
FREE(e);
}
void ivext_project(ivext_t *e, ivlist_t *iv)
{
for(ivlist_t *z = e->active_queue_head->next;
z != e->active_queue_head;
z = z->next) {
assert(z->end < iv->start ||
z->start > iv->end);
if(z->start > iv->end) {
z->start -= iv->end-iv->start+1;
z->end -= iv->end-iv->start+1;
}
}
ivlist_t *em = (ivlist_t *) MALLOC(sizeof(ivlist_t));
em->start = iv->start;
em->end = iv->end;
em->next = e->embed_list;
e->embed_list = em;
}
index_t ivext_embed(ivext_t *e, index_t u)
{
ivlist_t *el = e->embed_list;
while(el != (ivlist_t *) 0) {
if(u >= el->start)
u += el->end - el->start + 1;
el = el->next;
}
return u;
}
ivlist_t *ivext_halve(ivext_t *e, ivlist_t *iv)
{
assert(iv->end - iv->start + 1 >= 2);
index_t mid = (iv->start + iv->end)/2; // mid < iv->end
ivlist_t *h = ivext_get_spare(e);
h->start = iv->start;
h->end = mid;
iv->start = mid+1;
return h;
}
index_t ivext_queue_size(ivext_t *e)
{
index_t s = 0;
for(ivlist_t *iv = e->active_queue_head->next;
iv != e->active_queue_head;
iv = iv->next)
s += iv->end-iv->start+1;
return s;
}
index_t ivext_num_active_intervals(ivext_t *e)
{
index_t s = 0;
for(ivlist_t *iv = e->active_queue_head->next;
iv != e->active_queue_head;
iv = iv->next)
s++;
return s;
}
void ivext_queue_print(FILE *out, ivext_t *e, index_t rot)
{
index_t j = 0;
char x[16384];
char y[16384];
y[0] = '\0';
sprintf(x, "%c%12ld [",
rot == 0 ? ' ' : 'R',
ivext_queue_size(e));
strcat(y, x);
for(ivlist_t *iv = e->active_queue_head->next;
iv != e->active_queue_head;
iv = iv->next) {
assert(iv->start <= iv->end);
if(iv->start < iv->end)
sprintf(x,
"%s[%ld:%ld]",
j++ == 0 ? "" : ",",
ivext_embed(e, iv->start),
ivext_embed(e, iv->end));
else
sprintf(x,
"%s[%ld]",
j++ == 0 ? "[" : ",",
ivext_embed(e, iv->start));
strcat(y, x);
}
strcat(y, "] ");
fprintf(out, "%-120s", y);
fflush(out);
}
index_t extract_match(index_t is_root, temppathq_t *query, index_t *match)
{
// Assumes adjancency lists of query are sorted.
fprintf(stdout, "extract: %ld %ld %ld\n", query->n, query->k, query->nl);
push_time();
assert(query->k <= query->n);
ivext_t *e = ivext_alloc(query->n, query->k);
ivext_queue_print(stdout, e, 0);
if(!temppathq_execute(query)) {
fprintf(stdout, " -- false\n");
ivext_free(e);
if(!is_root)
temppathq_free(query);
double time = pop_time();
fprintf(stdout, "extract done [%.2lf ms]\n", time);
return 0;
}
fprintf(stdout, " -- true\n");
while(ivext_queue_size(e) > e->k) {
ivlist_t *iv = ivext_dequeue_first_nonsingleton(e);
ivlist_t *h = ivext_halve(e, iv);
ivext_enqueue_active(e, iv);
temppathq_t *qq = temppathq_cut(query, h->start, h->end);
ivext_queue_print(stdout, e, 0);
if(temppathq_execute(qq)) {
fprintf(stdout, " -- true\n");
if(!is_root)
temppathq_free(query);
query = qq;
is_root = 0;
ivext_project(e, h);
ivext_enqueue_spare(e, h);
} else {
fprintf(stdout, " -- false\n");
temppathq_free(qq);
pnunlink(iv);
ivext_enqueue_active(e, h);
qq = temppathq_cut(query, iv->start, iv->end);
ivext_queue_print(stdout, e, 0);
if(temppathq_execute(qq)) {
fprintf(stdout, " -- true\n");
if(!is_root)
temppathq_free(query);
query = qq;
is_root = 0;
ivext_project(e, iv);
ivext_enqueue_spare(e, iv);
} else {
fprintf(stdout, " -- false\n");
temppathq_free(qq);
ivext_enqueue_active(e, iv);
while(ivext_num_active_intervals(e) > e->k) {
// Rotate queue until outlier is out ...
ivlist_t *iv = e->active_queue_head->next;
pnunlink(iv);
qq = temppathq_cut(query, iv->start, iv->end);
ivext_queue_print(stdout, e, 1);
if(temppathq_execute(qq)) {
fprintf(stdout, " -- true\n");
if(!is_root)
temppathq_free(query);
query = qq;
is_root = 0;
ivext_project(e, iv);
ivext_enqueue_spare(e, iv);
} else {
fprintf(stdout, " -- false\n");
temppathq_free(qq);
ivext_enqueue_active(e, iv);
}
}
}
}
}
for(index_t i = 0; i < query->k; i++)
match[i] = ivext_embed(e, i);
ivext_free(e);
if(!is_root)
temppathq_free(query);
double time = pop_time();
fprintf(stdout, "extract done [%.2lf ms]\n", time);
return 1;
}
/**************************************************************** The lister. */
#define M_QUERY 0
#define M_OPEN 1
#define M_CLOSE 2
#define M_REWIND_U 3
#define M_REWIND_L 4
index_t command_mnemonic(index_t command)
{
return command >> 60;
}
index_t command_index(index_t command)
{
return command & (~(0xFFUL<<60));
}
index_t to_command_idx(index_t mnemonic, index_t idx)
{
assert(idx < (1UL << 60));
return (mnemonic << 60)|idx;
}
index_t to_command(index_t mnemonic)
{
return to_command_idx(mnemonic, 0UL);
}
typedef struct
{
index_t n; // number of elements in universe
index_t k; // size of the sets to be listed
index_t *u; // upper bound as a bitmap
index_t u_size; // size of upper bound
index_t *l; // lower bound
index_t l_size; // size of lower bound
index_t *stack; // a stack for maintaining state
index_t stack_capacity; // ... the capacity of the stack
index_t top; // index of stack top
temppathq_t *root; // the root query
} lister_t;
void lister_push(lister_t *t, index_t word)
{
assert(t->top + 1 < t->stack_capacity);
t->stack[++t->top] = word;
}
index_t lister_pop(lister_t *t)
{
return t->stack[t->top--];
}
index_t lister_have_work(lister_t *t)
{
return t->top >= 0;
}
index_t lister_in_l(lister_t *t, index_t j)
{
for(index_t i = 0; i < t->l_size; i++)
if(t->l[i] == j)
return 1;
return 0;
}
void lister_push_l(lister_t *t, index_t j)
{
assert(!lister_in_l(t, j) && t->l_size < t->k);
t->l[t->l_size++] = j;
}
void lister_pop_l(lister_t *t)
{
assert(t->l_size > 0);
t->l_size--;
}
void lister_reset(lister_t *t)
{
t->l_size = 0;
t->top = -1;
lister_push(t, to_command(M_QUERY));
for(index_t i = 0; i < t->n; i++)
bitset(t->u, i, 1);
t->u_size = t->n;
}
lister_t *lister_alloc(index_t n, index_t k, temppathq_t *root)
{
assert(n >= 1 && n < (1UL << 60) && k >= 1 && k <= n);
lister_t *t = (lister_t *) MALLOC(sizeof(lister_t));
t->n = n;
t->k = k;
t->u = alloc_idxtab((n+63)/64);
t->l = alloc_idxtab(k);
t->stack_capacity = n + k*(k+1+2*k) + 1;
t->stack = alloc_idxtab(t->stack_capacity);
lister_reset(t);
t->root = root;
if(t->root != (temppathq_t *) 0) {
assert(t->root->n == t->n);
assert(t->root->k == t->k);
assert(t->root->nl == 0);
}
return t;
}
void lister_free(lister_t *t)
{
if(t->root != (temppathq_t *) 0)
temppathq_free(t->root);
FREE(t->u);
FREE(t->l);
FREE(t->stack);
FREE(t);
}
void lister_get_proj_embed(lister_t *t, index_t **proj_out, index_t **embed_out)
{
index_t n = t->n;
index_t usize = t->u_size;
index_t *embed = (index_t *) MALLOC(sizeof(index_t)*usize);
index_t *proj = (index_t *) MALLOC(sizeof(index_t)*n);
// could parallelize this (needs parallel prefix sum)
index_t run = 0;
for(index_t i = 0; i < n; i++) {
if(bitget(t->u, i)) {
proj[i] = run;
embed[run] = i;
run++;
} else {
proj[i] = PROJ_UNDEF;
}
}
assert(run == usize);
*proj_out = proj;
*embed_out = embed;
}
void lister_query_setup(lister_t *t, temppathq_t **q_out, index_t **embed_out)
{
index_t *proj;
index_t *embed;
// set up the projection with u and l
lister_get_proj_embed(t, &proj, &embed);
temppathq_t *qq = temppathq_project(t->root,
t->u_size, proj, embed,
t->l_size, t->l);
FREE(proj);
*q_out = qq;
*embed_out = embed;
}
index_t lister_extract(lister_t *t, index_t *s)
{
// assumes t->u contains all elements of t->l
// (otherwise query is trivial no)
assert(t->root != (temppathq_t *) 0);
if(t->u_size == t->n) {
// rush the root query without setting up a copy
return extract_match(1, t->root, s);
} else {
// a first order of business is to set up the query
// based on the current t->l and t->u; this includes
// also setting up the embedding back to the root,
// in case we are lucky and actually discover a match
temppathq_t *qq; // will be released by extractor
index_t *embed;
lister_query_setup(t, &qq, &embed);
// now execute the interval extractor ...
index_t got_match = extract_match(0, qq, s);
// ... and embed the match (if any)
if(got_match) {
for(index_t i = 0; i < t->k; i++)
s[i] = embed[s[i]];
}
FREE(embed);
return got_match;
}
}
index_t lister_run(lister_t *t, index_t *s)
{
while(lister_have_work(t)) {
index_t cmd = lister_pop(t);
index_t mnem = command_mnemonic(cmd);
index_t idx = command_index(cmd);
switch(mnem) {
case M_QUERY:
if(t->k <= t->u_size && lister_extract(t, s)) {
// we have discovered a match, which we need to
// put on the stack to continue work when the user
// requests this
for(index_t i = 0; i < t->k; i++)
lister_push(t, s[i]);
lister_push(t, to_command_idx(M_OPEN, t->k-1));
// now report our discovery to user
return 1;
}
break;
case M_OPEN:
{
index_t *x = t->stack + t->top - t->k + 1;
index_t k = 0;
for(; k < idx; k++)
if(!lister_in_l(t, x[k]))
break;
if(k == idx) {
// opening on last element of x not in l
// so we can dispense with x as long as we remember to
// insert x[idx] back to u when rewinding
for(index_t j = 0; j < t->k; j++)
lister_pop(t); // axe x from stack
if(!lister_in_l(t, x[idx])) {
bitset(t->u, x[idx], 0); // remove x[idx] from u
t->u_size--;
lister_push(t, to_command_idx(M_REWIND_U, x[idx]));
lister_push(t, to_command(M_QUERY));
}
} else {
// have still other elements of x that we need to
// open on, so must keep x in stack
// --
// invariant that controls stack size:
// each open increases l by at least one
lister_push(t, to_command_idx(M_CLOSE, idx));
if(!lister_in_l(t, x[idx])) {
bitset(t->u, x[idx], 0); // remove x[idx] from u
t->u_size--;
lister_push(t, to_command_idx(M_REWIND_U, x[idx]));
// force x[0],x[1],...,x[idx-1] to l
index_t j = 0;
for(; j < idx; j++) {
if(!lister_in_l(t, x[j])) {
if(t->l_size >= t->k)
break;
lister_push_l(t, x[j]);
lister_push(t,
to_command_idx(M_REWIND_L, x[j]));
}
}
if(j == idx)
lister_push(t, to_command(M_QUERY));
}
}
}
break;
case M_CLOSE:
assert(idx > 0);
lister_push(t, to_command_idx(M_OPEN, idx-1));
break;
case M_REWIND_U:
bitset(t->u, idx, 1);
t->u_size++;
break;
case M_REWIND_L:
lister_pop_l(t);
break;
}
}
lister_push(t, to_command(M_QUERY));
return 0;
}
/******************************************************** Root query builder. */
// Query builder for directed graphs
//
temppathq_t *build_temppathq_dir(graph_t *g, index_t k, index_t *kk)
{
push_memtrack();
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
index_t *pos = alloc_idxtab(n*tmax);
index_t *adj = alloc_idxtab(n*tmax+2*m);
index_t ns = k;
shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
temppathq_t *root = (temppathq_t *) MALLOC(sizeof(temppathq_t));
root->is_stub = 0;
root->n = g->num_vertices;
root->k = k;
root->tmax = tmax;
root->pos = pos;
root->adj = adj;
root->nl = 0;
root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl);
root->ns = ns;
root->shade = shade;
root->vert_loc = 0;
root->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*root->n);
assert(tmax >= k-1);
push_time();
fprintf(stdout, "build query: ");
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
pos[u] = 0;
double time = pop_time();
fprintf(stdout, "[zero: %.2lf ms] ", time);
fflush(stdout);
push_time();
index_t *e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel occurrence count
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
index_t nt = num_threads();
index_t block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
//index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = (pos + (n*t));
//if(start <= u && u <= stop) {
// // I am responsible for u, record adjacency to u
// pos_t[u]++;
//}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
pos_t[v]++;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
//index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = pos + n*t;
//pos_t[u]++;
pos_t[v]++;
}
#endif
index_t run = prefixsum(n*tmax, pos, 1);
assert(run == (n*tmax+m));
time = pop_time();
fprintf(stdout, "[pos: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
adj[pos[u]] = 0;
e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel aggregation to bins
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
nt = num_threads();
block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
//if(start <= u && u <= stop) {
// // I am responsible for u, record adjacency to u
// index_t pu = pos[n*t+u];
// adj[pu + 1 + adj[pu]++] = v;
//}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
index_t pv = pos[n*t+v];
adj[pv + 1 + adj[pv]++] = u;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
//index_t pu = pos[n*t+u];
index_t pv = pos[n*t+v];
//adj[pu + 1 + adj[pu]++] = v;
adj[pv + 1 + adj[pv]++] = u;
}
#endif
time = pop_time();
fprintf(stdout, "[adj: %.2lf ms] ", time);
fflush(stdout);
//print_temppathq(root);
push_time();
adjsort(n*tmax, pos, adj);
time = pop_time();
fprintf(stdout, "[adjsort: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
shade_map_t s = 0;
for(index_t j = 0; j < k; j++)
if(g->colors[u] == kk[j])
s |= 1UL << j;
shade[u] = s;
//fprintf(stdout, "%4ld: 0x%08X\n", u, shade[u]);
}
time = pop_time();
fprintf(stdout, "[shade: %.2lf ms] ", time);
fflush(stdout);
time = pop_time();
fprintf(stdout, "done. [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
return root;
}
// Query builder for undirected graphs
//
temppathq_t *build_temppathq(graph_t *g, index_t k, index_t *kk)
{
push_memtrack();
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
index_t *pos = alloc_idxtab(n*tmax);
index_t *adj = alloc_idxtab(n*tmax+2*m);
index_t ns = k;
shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
temppathq_t *root = (temppathq_t *) MALLOC(sizeof(temppathq_t));
root->is_stub = 0;
root->n = g->num_vertices;
root->k = k;
root->tmax = tmax;
root->pos = pos;
root->adj = adj;
root->nl = 0;
root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl);
root->ns = ns;
root->shade = shade;
root->vert_loc = 0;
root->vsum = (scalar_t *) MALLOC(sizeof(index_t)*root->n);
assert(tmax >= k-1);
push_time();
fprintf(stdout, "build query: ");
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
pos[u] = 0;
double time = pop_time();
fprintf(stdout, "[zero: %.2lf ms] ", time);
fflush(stdout);
push_time();
index_t *e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel occurrence count
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
index_t nt = num_threads();
index_t block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = (pos + (n*t));
if(start <= u && u <= stop) {
// I am responsible for u, record adjacency to u
pos_t[u]++;
}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
pos_t[v]++;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = pos + n*t;
pos_t[u]++;
pos_t[v]++;
}
#endif
index_t run = prefixsum(n*tmax, pos, 1);
assert(run == (n*tmax+2*m));
time = pop_time();
fprintf(stdout, "[pos: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++) {
adj[pos[u]] = 0;
}
e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel aggregation to bins
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
nt = num_threads();
block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
if(start <= u && u <= stop) {
// I am responsible for u, record adjacency to u
index_t pu = pos[n*t+u];
adj[pu + 1 + adj[pu]++] = v;
}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
index_t pv = pos[n*t+v];
adj[pv + 1 + adj[pv]++] = u;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
index_t pu = pos[n*t+u];
index_t pv = pos[n*t+v];
adj[pu + 1 + adj[pu]++] = v;
adj[pv + 1 + adj[pv]++] = u;
}
#endif
time = pop_time();
fprintf(stdout, "[adj: %.2lf ms] ", time);
fflush(stdout);
push_time();
adjsort(n*tmax, pos, adj);
time = pop_time();
fprintf(stdout, "[adjsort: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
shade_map_t s = 0;
for(index_t j = 0; j < k; j++)
if(g->colors[u] == kk[j])
s |= 1UL << j;
shade[u] = s;
// fprintf(stdout, "%4ld: 0x%08X\n", u, shade[u]);
}
time = pop_time();
fprintf(stdout, "[shade: %.2lf ms] ", time);
fflush(stdout);
time = pop_time();
fprintf(stdout, "done. [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
//print_temppathq(root);
return root;
}
void query_pre_mk1(temppathq_t *in, temppathq_t **out_q, index_t **out_map)
{
push_memtrack();
index_t nt = num_threads();
index_t i_n = in->n;
index_t k = in->k;
index_t tmax = in->tmax;
index_t *i_pos = in->pos;
index_t *i_adj = in->adj;
index_t ns = in->ns;
shade_map_t *i_shade = in->shade;
push_time();
fprintf(stdout, "query pre [1]: ");
fflush(stdout);
push_time();
// input-to-output vertex map
index_t *v_map_i2o = (index_t *) MALLOC(sizeof(index_t)*i_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++)
v_map_i2o[u] = UNDEFINED;
index_t v_cnt = 0;
#ifdef BUILD_PARALLEL
// parallely construct input-to-output vertex map
index_t block_size = i_n/nt;
index_t t_vcnt[nt];
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
t_vcnt[th] = 0;
for(index_t u = start; u <= stop; u++) {
if(i_shade[u])
v_map_i2o[u] = t_vcnt[th]++;
}
}
// prefix sum
for(index_t th = 1; th < nt; th++)
t_vcnt[th] += t_vcnt[th-1];
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
index_t tsum = (th==0 ? 0 : t_vcnt[th-1]);
for(index_t u = start; u <= stop; u++) {
if(i_shade[u])
v_map_i2o[u] += tsum;
}
}
v_cnt = t_vcnt[nt-1];
#else
// serially construct input-to-output vertex map
for(index_t u = 0; u < i_n; u++) {
if(i_shade[u])
v_map_i2o[u] = v_cnt++;
}
#endif
// output-to-input vertex map
// required to reconstruct solution in original graph
index_t o_n = v_cnt;
index_t *v_map_o2i = (index_t *) MALLOC(sizeof(index_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
v_map_o2i[o_u] = u;
}
fprintf(stdout, "[map: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output position list
index_t *o_pos = alloc_idxtab(o_n*tmax);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_pos[u] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_pos_t[o_u]++;
}
}
}
}
index_t o_m = parallelsum(o_n*tmax, o_pos);
index_t run = prefixsum(o_n*tmax, o_pos, 1);
assert(run == (o_n*tmax+o_m));
fprintf(stdout, "[pos: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output adjacency list
index_t *o_adj = alloc_idxtab(o_n*tmax + o_m);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_adj[o_pos[u]] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
index_t o_pu = o_pos_t[o_u];
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_adj[o_pu + 1 + o_adj[o_pu]++] = o_v;
}
}
}
}
fprintf(stdout, "[adj: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output shade map
shade_map_t *o_shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
o_shade[o_u] = i_shade[u];
}
fprintf(stdout, "[shade: %.2lf ms] ", pop_time());
fflush(stdout);
temppathq_t *out = (temppathq_t *) MALLOC(sizeof(temppathq_t));
out->is_stub = 0;
out->n = o_n;
out->k = k;
out->tmax = tmax;
out->pos = o_pos;
out->adj = o_adj;
out->nl = 0;
out->l = (index_t *) MALLOC(sizeof(index_t)*out->nl);
out->ns = ns;
out->shade = o_shade;
out->vert_loc = in->vert_loc;
out->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*out->n);
*out_q = out;
*out_map = v_map_o2i;
FREE(v_map_i2o);
fprintf(stdout, "done. [%.2lf ms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
}
void query_pre_mk2(index_t is_dir, temppathq_t *in, temppathq_t **out_q, index_t **out_map)
{
push_memtrack();
index_t nt = num_threads();
index_t i_n = in->n;
index_t k = in->k;
index_t tmax = in->tmax;
index_t *i_pos = in->pos;
index_t *i_adj = in->adj;
index_t ns = in->ns;
shade_map_t *i_shade = in->shade;
// Preprocessing steps
// 1. merge graph temporal graph to a static instance
// 2. build vertex localised sieve for static graph
// 3. remove all vertices which are not incident to a match
push_time();
// building path query
pathq_t * pathq = (pathq_t *) 0;
if(is_dir) {
pathq = build_pathq_dir(in);
} else {
pathq = build_pathq(in);
}
// evaluate vertex localised sieve
scalar_t master_sum = 0;
scalar_t *master_vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*i_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++)
master_vsum[u] = 0;
// Note: restricting number of repetitions to one. Field size used is
// GF(2^64) and per-vertex false negative probability is (2k-1)/2^{64}.
// TODO: cross-verify experimental results
// DONE: verified, a single run of sieve is sufficient
index_t repeats = 1;
for(index_t r = 0; r < repeats; r++) {
fprintf(stdout, "oracle [path]: ");
scalar_t sum = pathq_execute(pathq);
scalar_t *vsum = pathq->vsum;
// Support size
index_t support_size = 0;
#ifdef BUILD_PARALLEL
index_t nt = num_threads();
index_t block_size = i_n/nt;
index_t ts_size[MAX_THREADS];
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
ts_size[th] = 0;
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
if(vsum[u] != 0)
ts_size[th]++;
}
}
for(index_t th = 0; th < nt; th++){
support_size += ts_size[th];
}
#else
for(index_t u = 0; u < i_n; u++) {
if(vsum[u] != 0)
support_size++;
}
#endif
fprintf(stdout, " -- %s [%ld]\n", sum!=0?"true":"false", support_size);
fflush(stdout);
// update master sum
master_sum = (master_sum!=0 ? master_sum : sum);
// update master vsum
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++)
master_vsum[u] = (master_vsum[u]!=0 ? master_vsum[u] : vsum[u]);
}
// free memory
pathq_free(pathq);
//for(index_t u = 0; u < i_n; u++)
// fprintf(stdout, "%4ld:"SCALAR_FORMAT_STRING"\n", u+1, master_vsum[u]);
// retain vertices which are incident to at least one match
push_time();
// input-to-output vertex map
index_t *v_map_i2o = (index_t *) MALLOC(sizeof(index_t)*i_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++)
v_map_i2o[u] = UNDEFINED;
index_t v_cnt = 0;
#ifdef BUILD_PARALLEL
// parallely construct input-to-output vertex map
index_t block_size = i_n/nt;
index_t t_vcnt[nt];
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
t_vcnt[th] = 0;
for(index_t u = start; u <= stop; u++) {
if(master_vsum[u])
v_map_i2o[u] = t_vcnt[th]++;
}
}
// prefix sum
for(index_t th = 1; th < nt; th++)
t_vcnt[th] += t_vcnt[th-1];
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
index_t tsum = (th==0 ? 0 : t_vcnt[th-1]);
for(index_t u = start; u <= stop; u++) {
if(master_vsum[u])
v_map_i2o[u] += tsum;
}
}
v_cnt = t_vcnt[nt-1];
#else
// serially construct input-to-output vertex map
for(index_t u = 0; u < i_n; u++) {
if(master_vsum[u])
v_map_i2o[u] = v_cnt++;
}
#endif
// output-to-input vertex map
// required to reconstruct solution in original graph
index_t o_n = v_cnt;
index_t *v_map_o2i = (index_t *) MALLOC(sizeof(index_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
v_map_o2i[o_u] = u;
}
fprintf(stdout, "query pre [2]: ");
fprintf(stdout, "[map: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output position list
index_t *o_pos = alloc_idxtab(o_n*tmax);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_pos[u] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_pos_t[o_u]++;
}
}
}
}
index_t o_m = parallelsum(o_n*tmax, o_pos);
index_t run = prefixsum(o_n*tmax, o_pos, 1);
assert(run == (o_n*tmax+o_m));
fprintf(stdout, "[pos: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output adjacency list
index_t *o_adj = alloc_idxtab(o_n*tmax + o_m);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_adj[o_pos[u]] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
index_t o_pu = o_pos_t[o_u];
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_adj[o_pu + 1 + o_adj[o_pu]++] = o_v;
}
}
}
}
fprintf(stdout, "[adj: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output shade map
shade_map_t *o_shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
o_shade[o_u] = i_shade[u];
}
fprintf(stdout, "[shade: %.2lf ms] ", pop_time());
fprintf(stdout, "done. [%.2lf ms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
temppathq_t *out = (temppathq_t *) MALLOC(sizeof(temppathq_t));
out->is_stub = 0;
out->n = o_n;
out->k = k;
out->tmax = tmax;
out->pos = o_pos;
out->adj = o_adj;
out->nl = 0;
out->l = (index_t *) MALLOC(sizeof(index_t)*out->nl);
out->ns = ns;
out->shade = o_shade;
out->vert_loc = in->vert_loc;
out->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*out->n);
*out_q = out;
*out_map = v_map_o2i;
FREE(master_vsum);
FREE(v_map_i2o);
}
void query_post_mk1(index_t *uu, temppathq_t *in, temppathq_t **out_q,
index_t **out_map)
{
push_memtrack();
index_t nt = num_threads();
index_t i_n = in->n;
index_t k = in->k;
index_t tmax = in->tmax;
index_t *i_pos = in->pos;
index_t *i_adj = in->adj;
index_t ns = in->ns;
shade_map_t *i_shade = in->shade;
// output graph
index_t o_n = k;
push_time();
fprintf(stdout, "subgraph: ");
fflush(stdout);
shellsort(k, uu);
push_time();
// input-to-output vertex map
index_t *v_map_i2o = (index_t *) MALLOC(sizeof(index_t)*i_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++)
v_map_i2o[u] = UNDEFINED;
// serially construct input-to-output vertex map
for(index_t i = 0; i < k; i++)
v_map_i2o[uu[i]] = i;
// output-to-input vertex map
// required to reconstruct solution in original graph
index_t *v_map_o2i = (index_t *) MALLOC(sizeof(index_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < o_n; i++) {
v_map_o2i[i] = uu[i];
}
fprintf(stdout, "[map: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output position list
index_t *o_pos = alloc_idxtab(o_n*tmax);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_pos[u] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_pos_t[o_u]++;
}
}
}
}
index_t o_m = parallelsum(o_n*tmax, o_pos);
index_t run = prefixsum(o_n*tmax, o_pos, 1);
assert(run == (o_n*tmax+o_m));
fprintf(stdout, "[pos: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output adjacency list
index_t *o_adj = alloc_idxtab(o_n*tmax + o_m);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_adj[o_pos[u]] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
index_t o_pu = o_pos_t[o_u];
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_adj[o_pu + 1 + o_adj[o_pu]++] = o_v;
}
}
}
}
fprintf(stdout, "[adj: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output shade map
shade_map_t *o_shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
o_shade[o_u] = i_shade[u];
}
fprintf(stdout, "[shade: %.2lf ms] ", pop_time());
fflush(stdout);
temppathq_t *out = (temppathq_t *) MALLOC(sizeof(temppathq_t));
out->is_stub = 0;
out->n = o_n;
out->k = k;
out->tmax = tmax;
out->pos = o_pos;
out->adj = o_adj;
out->nl = 0;
out->l = (index_t *) MALLOC(sizeof(index_t)*out->nl);
out->ns = ns;
out->shade = o_shade;
out->vert_loc = in->vert_loc;
out->vsum = (scalar_t *) MALLOC(sizeof(scalar_t)*out->n);
*out_q = out;
*out_map = v_map_o2i;
FREE(v_map_i2o);
fprintf(stdout, "done. [%.2lf ms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
}
/****************************************************** Input reader (ASCII). */
void skipws(FILE *in)
{
int c;
do {
c = fgetc(in);
if(c == '#') {
do {
c = fgetc(in);
} while(c != EOF && c != '\n');
}
} while(c != EOF && isspace(c));
if(c != EOF)
ungetc(c, in);
}
#define CMD_NOP 0
#define CMD_TEST_UNIQUE 1
#define CMD_TEST_COUNT 2
#define CMD_LIST_FIRST 3
#define CMD_LIST_FIRST_VLOC 4
#define CMD_LIST_ALL_VLOC 5
#define CMD_RUN_ORACLE 6
char *cmd_legend[] = { "no operation", "test unique", "test count", "list first", "list first (localised)", "list all (localised)", "run oracle" };
void reader_ascii(FILE *in,
graph_t **g_out, index_t *k_out, index_t **kk_out,
index_t *cmd_out, index_t **cmd_args_out)
{
push_time();
push_memtrack();
index_t n = 0;
index_t m = 0;
index_t tmax = 0;
index_t is_dir = 0;
graph_t *g = (graph_t *) 0;
index_t *kk = (index_t *) 0;
index_t cmd = CMD_NOP;
index_t *cmd_args = (index_t *) 0;
index_t i, j, d, k, t;
skipws(in);
while(!feof(in)) {
skipws(in);
int c = fgetc(in);
switch(c) {
case 'p':
if(g != (graph_t *) 0)
ERROR("duplicate parameter line");
skipws(in);
if(fscanf(in, "motif %ld %ld %ld %ld", &n, &m, &tmax, &is_dir) != 4)
ERROR("invalid parameter line");
if(n <= 0 || m < 0 ) {
ERROR("invalid input parameters (n = %ld, m = %ld, tmax = %ld)",
n, m, tmax);
}
g = graph_alloc(n);
graph_set_is_directed(g, is_dir);
graph_set_max_time(g, tmax);
break;
case 'e':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before edges");
skipws(in);
if(fscanf(in, "%ld %ld %ld", &i, &j, &t) != 3)
ERROR("invalid edge line");
//if(i < 1 || i > n || j < 1 || j > n || t < 1 || t > tmax) {
// ERROR("invalid edge (i = %ld, j = %ld t = %ld with n = %ld, tmax = %ld)",
// i, j, t, n, tmax);
//}
graph_add_edge(g, i-1, j-1, t-1);
break;
case 'n':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before vertex colors");
skipws(in);
if(fscanf(in, "%ld %ld", &i, &d) != 2)
ERROR("invalid color line");
if(i < 1 || i > n || d < 1)
ERROR("invalid color line (i = %ld, d = %ld with n = %ld)",
i, d, n);
graph_set_color(g, i-1, d-1);
break;
case 'k':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before motif");
skipws(in);
if(fscanf(in, "%ld", &k) != 1)
ERROR("invalid motif line");
if(k < 1 || k > n)
ERROR("invalid motif line (k = %ld with n = %d)", k, n);
kk = alloc_idxtab(k);
for(index_t u = 0; u < k; u++) {
skipws(in);
if(fscanf(in, "%ld", &i) != 1)
ERROR("error parsing motif line");
if(i < 1)
ERROR("invalid color on motif line (i = %ld)", i);
kk[u] = i-1;
}
break;
case 't':
if(g == (graph_t *) 0 || kk == (index_t *) 0)
ERROR("parameter and motif lines must be given before test");
skipws(in);
{
char cmdstr[128];
if(fscanf(in, "%100s", cmdstr) != 1)
ERROR("invalid test command");
if(!strcmp(cmdstr, "unique")) {
cmd_args = alloc_idxtab(k);
for(index_t u = 0; u < k; u++) {
skipws(in);
if(fscanf(in, "%ld", &i) != 1)
ERROR("error parsing test line");
if(i < 1 || i > n)
ERROR("invalid test line entry (i = %ld)", i);
cmd_args[u] = i-1;
}
heapsort_indext(k, cmd_args);
for(index_t u = 1; u < k; u++)
if(cmd_args[u-1] >= cmd_args[u])
ERROR("test line contains duplicate entries");
cmd = CMD_TEST_UNIQUE;
} else {
if(!strcmp(cmdstr, "count")) {
cmd_args = alloc_idxtab(1);
skipws(in);
if(fscanf(in, "%ld", &i) != 1)
ERROR("error parsing test line");
if(i < 0)
ERROR("count on test line cannot be negative");
cmd = CMD_TEST_COUNT;
cmd_args[0] = i;
} else {
ERROR("unrecognized test command \"%s\"", cmdstr);
}
}
}
break;
case EOF:
break;
default:
ERROR("parse error");
}
}
if(g == (graph_t *) 0)
ERROR("no graph given in input");
if(kk == (index_t *) 0)
ERROR("no motif given in input");
for(index_t i = 0; i < n; i++) {
if(g->colors[i] == -1)
ERROR("no color assigned to vertex i = %ld", i);
}
double time = pop_time();
fprintf(stdout,
"input: n = %ld, m = %ld, k = %ld, t = %ld [%.2lf ms] ",
g->num_vertices,
g->num_edges,
k,
g->max_time,
time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
*g_out = g;
*k_out = k;
*kk_out = kk;
*cmd_out = cmd;
*cmd_args_out = cmd_args;
}
/***************************************************** Input reader (binary). */
#define BIN_MAGIC 0x1234567890ABCDEFUL
void reader_bin(FILE *in,
graph_t **g_out, index_t *k_out, index_t **kk_out,
index_t *cmd_out, index_t **cmd_args_out)
{
push_time();
push_memtrack();
index_t magic = 0;
index_t n = 0;
index_t m = 0;
graph_t *g = (graph_t *) 0;
index_t k = 0;
index_t has_target = 0;
index_t *kk = (index_t *) 0;
index_t cmd = CMD_NOP;
index_t *cmd_args = (index_t *) 0;
if(fread(&magic, sizeof(index_t), 1UL, in) != 1UL)
ERROR("error reading input");
if(magic != BIN_MAGIC)
ERROR("error reading input");
if(fread(&n, sizeof(index_t), 1UL, in) != 1UL)
ERROR("error reading input");
if(fread(&m, sizeof(index_t), 1UL, in) != 1UL)
ERROR("error reading input");
assert(n >= 0 && m >= 0 && m%2 == 0);
g = graph_alloc(n);
index_t *e = graph_edgebuf(g, m/2);
if(fread(e, sizeof(index_t), m, in) != m)
ERROR("error reading input");
if(fread(g->colors, sizeof(index_t), n, in) != n)
ERROR("error reading input");
if(fread(&has_target, sizeof(index_t), 1UL, in) != 1UL)
ERROR("error reading input");
assert(has_target == 0 || has_target == 1);
if(has_target) {
if(fread(&k, sizeof(index_t), 1UL, in) != 1UL)
ERROR("error reading input");
assert(k >= 0);
kk = alloc_idxtab(k);
if(fread(kk, sizeof(index_t), k, in) != k)
ERROR("error reading input");
if(fread(&cmd, sizeof(index_t), 1UL, in) != 1UL)
ERROR("error reading input");
switch(cmd) {
case CMD_NOP:
break;
case CMD_TEST_UNIQUE:
cmd_args = alloc_idxtab(k);
if(fread(cmd_args, sizeof(index_t), k, in) != k)
ERROR("error reading input");
shellsort(k, cmd_args);
break;
case CMD_TEST_COUNT:
cmd_args = alloc_idxtab(1);
if(fread(cmd_args, sizeof(index_t), 1UL, in) != 1UL)
ERROR("error reading input");
break;
default:
ERROR("invalid command in binary input stream");
break;
}
}
double time = pop_time();
fprintf(stdout,
"input: n = %ld, m = %ld, k = %ld [%.2lf ms] ",
g->num_vertices,
g->num_edges,
k,
time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
*g_out = g;
*k_out = k;
*kk_out = kk;
*cmd_out = cmd;
*cmd_args_out = cmd_args;
}
/************************************************************ Temporal DFS. */
index_t temp_dfs(index_t n, index_t k, index_t tmax, index_t *pos,
index_t *adj, index_t *in_stack, stk_t *s)
{
stack_node_t e;
stack_top(s, &e);
index_t u = e.u;
index_t l = e.l;
index_t tmin = e.t;
// reached depth 'k'
if(s->n == k) // TODO: fix this to s->n == k
return 1;
for(index_t t = tmin; t < tmax; t++) {
index_t *pos_t = pos + t*n;
index_t pu = pos_t[u];
index_t nu = adj[pu];
if(nu == 0) continue;
index_t *adj_u = adj + pu;
for(index_t i = 1; i <= nu; i++) {
index_t v = adj_u[i];
if(in_stack[v]) continue;
stack_node_t e;
e.u = v;
e.l = l+1;
e.t = t+1;
stack_push(s, &e);
in_stack[v] = 1;
if(temp_dfs(n, k, tmax, pos, adj, in_stack, s))
return 1;
stack_pop(s, &e);
in_stack[v] = 0;
}
}
return 0; // not found
}
index_t find_temppath(temppathq_t *root, index_t *uu, index_t *tt)
{
index_t n = root->n;
index_t k = root->k;
index_t tmax = root->tmax;
index_t *pos = root->pos;
index_t *adj = root->adj;
// alloc memory
index_t *v_rand = alloc_idxtab(n);
// random permutation of vertices
index_t seed = irand();
randperm(n, seed, v_rand);
index_t *in_stack = alloc_idxtab(n);
stk_t *s = stack_alloc(k);
for(index_t j = 0; j < n; j++) {
for(index_t i = 0; i < n; i++)
in_stack[i] = 0;
index_t u = v_rand[j];
stack_node_t e;
e.u = u;
e.l = 1;
e.t = 0;
stack_push(s, &e);
in_stack[u] = 1;
if(temp_dfs(n, k, tmax, pos, adj, in_stack, s)) {
index_t cnt = 0;
while(s->n) {
stack_node_t e;
stack_pop(s, &e);
index_t u = e.u;
index_t t = e.t;
uu[cnt] = u;
tt[cnt] = t;
cnt++;
}
break;
} else {
stack_empty(s);
}
}
FREE(v_rand);
FREE(in_stack);
stack_free(s);
return 1;
}
/********************************************************** temporal rev-DFS. */
//
index_t temp_revdfs(index_t n, index_t k, index_t tmax, index_t *pos,
index_t *adj, index_t *color, index_t *kk_in,
index_t *in_stack, stk_t *s, index_t *uu_out,
index_t *tt_out, index_t *t_opt)
{
if(s->n >= k) {
// reached depth k
assert(s->n <= k);
// allocate memory
index_t *uu_sol = (index_t *) malloc(k*sizeof(index_t));
index_t *kk_sol = (index_t *) malloc(k*sizeof(index_t));
index_t *tt_sol = (index_t *) malloc(k*sizeof(index_t));
// get vertices in stack
stack_get_vertices(s, uu_sol);
stack_get_timestamps(s, tt_sol);
// get vertex colors
for(index_t i = 0; i < k; i++)
kk_sol[i] = color[uu_sol[i]];
shellsort(k, kk_sol);
// check if colors match
index_t is_motif = 1;
for(index_t i = 0; i < k; i++) {
if(kk_sol[i] != kk_in[i]) {
is_motif = 0;
break;
}
}
// match found
if(is_motif) {
stack_node_t e;
stack_top(s, &e);
if(*t_opt > e.t) {
// copy solution vertices
for(index_t i = 0; i < k; i++)
uu_out[i] = uu_sol[i];
// copy solution timestamps
for(index_t i = 0; i < k; i++)
tt_out[i] = tt_sol[i];
*t_opt = e.t;
}
}
// free memory
free(uu_sol);
free(kk_sol);
free(tt_sol);
return 1;
} else {
stack_node_t e;
stack_top(s, &e);
index_t u = e.u;
//index_t l = e.l;
index_t t_start = e.t;
index_t t_end = 0;
for(index_t t = t_start-1; t >= t_end; t--) {
index_t *pos_t = pos + t*n;
index_t pu = pos_t[u];
index_t nu = adj[pu];
if(nu == 0) continue;
index_t *adj_u = adj + pu;
for(index_t i = 1; i <= nu; i++) {
index_t v = adj_u[i];
if(in_stack[v]) continue;
stack_node_t e;
e.u = v;
//e.l = l+1;
e.t = t;
stack_push(s, &e);
in_stack[v] = 1;
// recursive call to depth k
temp_revdfs(n, k, tmax, pos, adj, color, kk_in, in_stack, s,
uu_out, tt_out, t_opt);
stack_pop(s, &e);
in_stack[v] = 0;
}
}
}
return 1; // not found
}
index_t exhaustive_search(temppathq_t *root,
index_t *kk,
index_t *color,
index_t *v_map,
index_t cmd)
{
push_time();
push_memtrack();
index_t nt = num_threads();
index_t n = root->n;
index_t k = root->k;
index_t tmax = root->tmax;
index_t *pos = root->pos;
index_t *adj = root->adj;
scalar_t *vsum = root->vsum;
index_t *vsum_cnt_nt = alloc_idxtab(nt+1);
push_time();
index_t block_size = n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
index_t cnt = 0;
for(index_t i = start; i <= stop; i++)
cnt += (vsum[i] ? 1 : 0);
vsum_cnt_nt[th] = cnt;
}
// cosolidate thread counts
vsum_cnt_nt[nt] = 0;
prefixsum(nt+1, vsum_cnt_nt, 0);
index_t vsum_cnt = vsum_cnt_nt[nt];
// get vertices with non-zero value in `vsum`
index_t *vsum_vertices = alloc_idxtab(vsum_cnt);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
index_t j = vsum_cnt_nt[th];
for(index_t i = start; i <= stop; i++) {
if(vsum[i]) vsum_vertices[j++] = i;
}
}
index_t *v_seq = alloc_idxtab(vsum_cnt);
index_t seed = irand();
randperm(vsum_cnt, seed, v_seq);
double init_time = pop_time();
push_time();
index_t *uu_sol_nt = alloc_idxtab(k*nt);
index_t *tt_sol_nt = alloc_idxtab(k*nt);
index_t *in_stack_nt = alloc_idxtab(n*nt);
index_t *t_opt_nt = alloc_idxtab(nt);
block_size = vsum_cnt/nt;
double dfs_time = 0.0;
volatile index_t found = 0;
if(cmd == CMD_LIST_FIRST_VLOC) {
#ifdef BUILD_PARALLEL
#pragma omp parallel for shared(found)
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? vsum_cnt-1 : (start+block_size-1);
index_t *uu_sol = uu_sol_nt + th*k;
index_t *tt_sol = tt_sol_nt + th*k;
index_t *in_stack = in_stack_nt +th*n;
index_t t_opt = MATH_INF;
stk_t *s = stack_alloc(k);
for(index_t j = start; j <= stop; j++) {
if(found) break;
index_t u = vsum_vertices[v_seq[j]];
for(index_t i = 0; i < n; i++)
in_stack[i] = 0;
stack_node_t e;
e.u = u;
//e.l = k;
e.t = tmax;
stack_push(s, &e);
in_stack[u] = 1;
temp_revdfs(n, k, tmax, pos, adj, color, kk, in_stack, s, uu_sol, tt_sol, &t_opt);
if(t_opt != MATH_INF) {
found = th+1;
}
stack_empty(s);
}
stack_free(s);
}
// found a solution
if(found) {
index_t th = found-1;
index_t *uu_sol = uu_sol_nt + th*k;
index_t *tt_sol = tt_sol_nt + th*k;
dfs_time = pop_time();
fprintf(stdout, "solution [%ld, %.2lfms]: ", tt_sol[0], pop_time());
for(index_t i = k-1; i > 0; i--) {
index_t u = v_map[uu_sol[i]];
index_t v = v_map[uu_sol[i-1]];
index_t t = tt_sol[i-1];
fprintf(stdout, "[%ld, %ld, %ld]%s", u+1, v+1, t, i==1?"\n":" ");
}
}
} else {
//TODO: implement listing all solutions
fprintf(stdout, "listing all solutions not supported\n");
}
if(!found) dfs_time = pop_time();
FREE(vsum_cnt_nt);
FREE(uu_sol_nt);
FREE(tt_sol_nt);
FREE(in_stack_nt);
FREE(t_opt_nt);
FREE(v_seq);
FREE(vsum_vertices);
fprintf(stdout, "exhaustive-search: [init: %.2lfms] [dfs: %.2lfms] done."
" [%.2lfms] ", init_time, dfs_time, pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, " -- %s\n", found?"true":"false");
fflush(stdout);
return (found ? 1 : 0);
}
/******************************************************* Program entry point. */
#define PRE_NOP 0
#define PRE_MK1 1
#define PRE_MK2 2
#define PRE_MK3 3
int main(int argc, char **argv)
{
GF_PRECOMPUTE;
push_time();
push_memtrack();
index_t precomp = PRE_NOP;
index_t arg_cmd = CMD_NOP;
index_t flag_help = 0;
index_t have_seed = 0;
index_t have_input = 0;
index_t find_optimal = 0;
index_t seed = 123456789;
char *filename = (char *) 0;
for(index_t f = 1; f < argc; f++) {
if(argv[f][0] == '-') {
if(!strcmp(argv[f], "-h") || !strcmp(argv[f], "-help")) {
flag_help = 1;
break;
}
if(!strcmp(argv[f], "-bin")) {
flag_bin_input = 1;
}
if(!strcmp(argv[f], "-ascii")) {
flag_bin_input = 0;
}
if(!strcmp(argv[f], "-pre")) {
if(f == argc -1)
ERROR("preprocessing argument missing from command line");
precomp = atol(argv[++f]);
}
if(!strcmp(argv[f], "-optimal")) {
find_optimal = 1;
}
if(!strcmp(argv[f], "-oracle")) {
arg_cmd = CMD_RUN_ORACLE;
}
if(!strcmp(argv[f], "-first")) {
arg_cmd = CMD_LIST_FIRST;
}
if(!strcmp(argv[f], "-first-vloc")) {
arg_cmd = CMD_LIST_FIRST_VLOC;
}
if(!strcmp(argv[f], "-all-vloc")) {
arg_cmd = CMD_LIST_ALL_VLOC;
}
if(!strcmp(argv[f], "-seed")) {
if(f == argc - 1)
ERROR("random seed missing from command line");
seed = atol(argv[++f]);
have_seed = 1;
}
if(!strcmp(argv[f], "-in")) {
if(f == argc - 1)
ERROR("input file missing from command line");
have_input = 1;
filename = argv[++f];
}
}
}
fprintf(stdout, "invoked as:");
for(index_t f = 0; f < argc; f++)
fprintf(stdout, " %s", argv[f]);
fprintf(stdout, "\n");
if(flag_help) {
fprintf(stdout,
"usage: %s -pre <value> -optimal -<command-type> -seed <value> -in <input-file> -<file-type> \n"
" %s -h/help\n"
"\n"
" -pre <value> : <0> - no preprocessing (default)\n"
" <1> - preprocess step-1\n"
" <2> - preprocess step-2\n"
" <3> - preprocess step-1 and step-2\n"
" -optimal : obtain optimal solution (optional)\n"
" -<command-type> : <oracle> - decide existence of a solution\n"
" <first> - extract one solution\n"
" <first-vloc> - extract one solution (vertex localisation)\n"
" -seed <value> : integer value in range 1 to 2^32 -1\n"
" default value `%ld`\n"
" -in <input-file> : read from <input file>\n"
" read from <stdin> by default\n"
" -<file-type> : ascii - ascii input file (default) \n"
" bin - binary input file \n"
" -h or -help : help\n"
"\n"
, argv[0], argv[0], seed);
return 0;
}
if(have_seed == 0) {
fprintf(stdout,
"no random seed given, defaulting to %ld\n", seed);
}
fprintf(stdout, "random seed = %ld\n", seed);
FILE *in = stdin;
if(have_input) {
in = fopen(filename, "r");
if(in == NULL)
ERROR("unable to open file '%s'", filename);
} else {
fprintf(stdout, "no input file specified, defaulting to stdin\n");
}
fflush(stdout);
srand(seed);
graph_t *g;
index_t k;
index_t *kk;
index_t input_cmd;
index_t *cmd_args;
if(flag_bin_input) {
reader_bin(in, &g, &k, &kk, &input_cmd, &cmd_args);
} else {
reader_ascii(in, &g, &k, &kk, &input_cmd, &cmd_args);
}
index_t cmd = input_cmd; // by default execute command in input stream
if(arg_cmd != CMD_NOP)
cmd = arg_cmd; // override command in input stream
// build root query
index_t is_dir = 0;
temppathq_t *root = (temppathq_t *) 0;
if(g->is_directed) {
is_dir = 1;
root = build_temppathq_dir(g, k, kk);
} else {
root = build_temppathq(g, k, kk);
}
// keep a copy of colors
index_t *color = (index_t *) 0;
if(cmd == CMD_LIST_FIRST_VLOC || cmd == CMD_LIST_ALL_VLOC) {
color = alloc_idxtab(root->n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < root->n; u++)
color[u] = g->colors[u];
}
//free graph
graph_free(g);
push_time();
push_time();
// preprocess query
index_t *v_map_pre1;
index_t *v_map_pre2;
switch(precomp) {
case PRE_NOP:
{
// no precomputation
fprintf(stdout, "no preprocessing, default execution\n");
break;
}
case PRE_MK1:
{
// preprocess: remove vertices with no matching colors
temppathq_t *root_pre;
query_pre_mk1(root, &root_pre, &v_map_pre1);
temppathq_free(root);
root = root_pre;
// preprocessed graph statistics
index_t o_n = root->n;
index_t tmax = root->tmax;
index_t *o_pos = root->pos;
index_t *o_adj = root->adj;
index_t o_m = (o_pos[o_n*(tmax-1) + o_n-1] +
o_adj[o_pos[o_n*(tmax-1) + o_n-1]] - (o_n*tmax) + 1)/2;
fprintf(stdout, "output pre [1]: n = %ld, m = %ld, k = %ld \n",
o_n, o_m, k);
// required to reconstruct the solution in original graph
//FREE(v_map_pre1);
break;
}
case PRE_MK2:
{
// preprocess: constructing vertex localised sieve in static graph
temppathq_t *root_pre;
query_pre_mk2(is_dir, root, &root_pre, &v_map_pre2);
temppathq_free(root);
root= root_pre;
// preprocessed graph statistics
index_t o_n = root->n;
index_t tmax = root->tmax;
index_t *o_pos = root->pos;
index_t *o_adj = root->adj;
index_t o_m = (o_pos[o_n*(tmax-1) + o_n-1] +
o_adj[o_pos[o_n*(tmax-1) + o_n-1]] - (o_n*tmax)+1)/2;
fprintf(stdout, "output pre [2]: n = %ld, m = %ld, k = %ld \n",
o_n, o_m, k);
// required to reconstruct the solution in original graph
//FREE(v_map_pre2);
break;
}
case PRE_MK3:
{
// -- execute all preprocessing steps --
//
// preprocess: remove vertices with no matching colors
temppathq_t *root_pre1;
query_pre_mk1(root, &root_pre1, &v_map_pre1);
temppathq_free(root);
root = root_pre1;
// preprocessed graph statistics
index_t o_n = root->n;
index_t tmax = root->tmax;
index_t *o_pos = root->pos;
index_t *o_adj = root->adj;
index_t o_m = (o_pos[o_n*(tmax-1) + o_n-1] +
o_adj[o_pos[o_n*(tmax-1) + o_n-1]] - (o_n*tmax)+1)/2;
fprintf(stdout, "output pre [1]: n = %ld, m = %ld, k = %ld \n",
o_n, o_m, k);
// preprocess: constructing vertex localised sieve in static graph
temppathq_t *root_pre2;
query_pre_mk2(is_dir, root, &root_pre2, &v_map_pre2);
temppathq_free(root);
root= root_pre2;
// preprocessed graph statistics
o_n = root->n;
o_pos = root->pos;
o_adj = root->adj;
o_m = (o_pos[o_n*(tmax-1) + o_n-1] +
o_adj[o_pos[o_n*(tmax-1) + o_n-1]] - (o_n*tmax)+1)/2;
fprintf(stdout, "output pre [2]: n = %ld, m = %ld, k = %ld \n",
o_n, o_m, k);
// required to reconstruct the solution in original graph
//FREE(v_map_pre1);
//FREE(v_map_pre2);
break;
}
default:
break;
}
double precomp_time = pop_time();
push_time();
index_t SOLUTION_EXISTS = 0; // default: assume solution do not exists
// find optimal solution
if(find_optimal) {
// --- optimal solution ---
//
fprintf(stdout, "optimal : min = %ld, max = %ld\n", k-1, root->tmax);
// binary search: obtain optimal value of `t`
index_t t_opt = root->tmax;
index_t tmax = root->tmax;
index_t low = k-1;
index_t high = tmax;
while(low < high) {
index_t mid = (low+high)/2;
root->tmax = mid;
fprintf(stdout, "%13ld [%ld:%ld]\t\t", mid, low, high);
if(temppathq_execute(root)) {
if(t_opt > root->tmax)
t_opt = root->tmax;
high = mid;
fprintf(stdout, " -- true\n");
fflush(stdout);
SOLUTION_EXISTS = 1;
} else {
low = mid + 1;
fprintf(stdout, " -- false\n");
fflush(stdout);
}
}
root->tmax = t_opt;
if(!SOLUTION_EXISTS && CMD_NOP) {
// fprintf(stdout, " -- false\n");
fflush(stdout);
temppathq_free(root);
}
}
double opt_time = pop_time();
fprintf(stdout, "command: %s\n", cmd_legend[cmd]);
fflush(stdout);
push_time();
// execute command
switch(cmd) {
case CMD_NOP:
{
// no operation
temppathq_free(root);
break;
}
case CMD_TEST_UNIQUE:
{
// ---- test unique ---
//
// check if the solution is unique
index_t n = root->n;
index_t k = root->k;
lister_t *t = lister_alloc(n, k, root);
index_t *get = alloc_idxtab(k);
index_t ct = 0;
while(lister_run(t, get)) {
assert(ct == 0);
fprintf(stdout, "found %ld: ", ct);
for(index_t i = 0; i < k; i++)
fprintf(stdout, "%ld%s", get[i], i == k-1 ? "\n" : " ");
for(index_t l = 0; l < k; l++)
assert(get[l] == cmd_args[l]);
ct++;
}
assert(ct == 1);
FREE(get);
lister_free(t);
}
break;
case CMD_LIST_FIRST:
{
// --- list first solution ---
//
// list vertices: obtain `k` vertices satisfying our constraints
index_t n = root->n;
index_t k = root->k;
lister_t *t = lister_alloc(n, k, root);
index_t *get = alloc_idxtab(k);
index_t ct = 0;
if(lister_run(t, get)) {
fprintf(stdout, "found %ld: ", ct);
switch(precomp) {
case PRE_NOP:
for(index_t i = 0; i < k; i++)
fprintf(stdout, "%ld%s", get[i]+1, i == k-1 ? "\n" : " ");
break;
case PRE_MK1:
for(index_t i = 0; i < k; i++)
fprintf(stdout, "%ld%s", v_map_pre1[get[i]]+1, i == k-1 ? "\n" : " ");
break;
case PRE_MK2:
for(index_t i = 0; i < k; i++)
fprintf(stdout, "%ld%s", v_map_pre2[get[i]]+1, i == k-1 ? "\n" : " ");
break;
case PRE_MK3:
for(index_t i = 0; i < k; i++)
fprintf(stdout, "%ld%s", v_map_pre1[v_map_pre2[get[i]]]+1, i == k-1 ? "\n" : " ");
break;
default:
break;
}
ct++;
//if(cmd == CMD_LIST_FIRST || CMD_LIST_OPT)
// break;
}
// post-processing: obtain vertex-induced subgraph using
// k-vertices of previous step
if(ct) {
push_time();
index_t *v_map_post = (index_t *) 0;
temppathq_t *root_post = (temppathq_t *) 0;
query_post_mk1(get, root, &root_post, &v_map_post);
// final vertex map
index_t *v_map = alloc_idxtab(k);
switch(precomp) {
case PRE_NOP:
for(index_t i = 0; i < k; i++)
v_map[i] = v_map_post[i];
break;
case PRE_MK1:
for(index_t i = 0; i < k; i++)
v_map[i] = v_map_pre1[v_map_post[i]];
break;
case PRE_MK2:
for(index_t i = 0; i < k; i++)
v_map[i] = v_map_pre2[v_map_post[i]];
break;
case PRE_MK3:
for(index_t i = 0; i < k; i++)
v_map[i] = v_map_pre1[v_map_pre2[v_map_post[i]]];
break;
default:
break;
}
// find itenary: temporal-DFS to get travel itenary
index_t *uu_sol = alloc_idxtab(k);
index_t *tt_sol = alloc_idxtab(k);
find_temppath(root_post, uu_sol, tt_sol) ;
fprintf(stdout, "solution [%ld, %.2lfms]: ", tt_sol[0], pop_time());
for(index_t i = k-1; i > 0; i--) {
index_t u = v_map[uu_sol[i]];
index_t v = v_map[uu_sol[i-1]];
index_t t = tt_sol[i-1];
fprintf(stdout, "[%ld, %ld, %ld]%s", u+1, v+1, t, i==1?"\n":" ");
}
FREE(v_map_post);
FREE(v_map);
FREE(uu_sol);
FREE(tt_sol);
temppathq_free(root_post);
}
FREE(get);
lister_free(t);
}
break;
case CMD_LIST_FIRST_VLOC:
case CMD_LIST_ALL_VLOC:
{
fprintf(stdout, "oracle [temppath]: ");
fflush(stdout);
root->vert_loc = 1;
if(temppathq_execute(root)) {
fprintf(stdout, " -- true\n");
index_t n = root->n;
index_t *v_map = alloc_idxtab(n);
// build vertex map
switch(precomp) {
case PRE_NOP:
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < n; i++)
v_map[i] = i;
}
break;
case PRE_MK1:
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < n; i++)
v_map[i] = v_map_pre1[i];
}
break;
case PRE_MK2:
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < n; i++)
v_map[i] = v_map_pre2[i];
}
break;
case PRE_MK3:
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < n; i++)
v_map[i] = v_map_pre1[v_map_pre2[i]];
}
break;
default:
break;
}
// build color map
index_t *color_map = alloc_idxtab(n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < n; i++)
color_map[i] = color[v_map[i]];
exhaustive_search(root, kk, color_map, v_map, cmd);
FREE(v_map);
FREE(color_map);
} else {
fprintf(stdout, " -- false\n");
}
FREE(color);
temppathq_free(root);
}
break;
case CMD_RUN_ORACLE:
{
//if(!SOLUTION_EXISTS) break;
// --- run oracle ---
fprintf(stdout, "oracle [temppath]: ");
fflush(stdout);
if(temppathq_execute(root))
fprintf(stdout, " -- true\n");
else
fprintf(stdout, " -- false\n");
temppathq_free(root);
}
break;
default:
assert(0);
break;
}
// free vertex map
if(precomp == PRE_MK1)
FREE(v_map_pre1);
if(precomp == PRE_MK2)
FREE(v_map_pre2);
if(precomp == PRE_MK3) {
FREE(v_map_pre1);
FREE(v_map_pre2);
}
FREE(kk);
double cmd_time = pop_time();
double time = pop_time();
fprintf(stdout, "command done [%.2lf ms %.2lfms %.2lf ms %.2lf ms]\n",
precomp_time, opt_time, cmd_time, time);
if(input_cmd != CMD_NOP)
FREE(cmd_args);
time = pop_time();
fprintf(stdout, "grand total [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, "\n");
fprintf(stdout, "host: %s\n", sysdep_hostname());
fprintf(stdout,
"build: %s, %s, %s, %ld x %s\n",
#ifdef BUILD_PARALLEL
"multithreaded",
#else
"single thread",
#endif
#ifdef BUILD_PREFETCH
"prefetch",
#else
"no prefetch",
#endif
GENF_TYPE,
LIMBS_IN_LINE,
LIMB_TYPE);
fprintf(stdout,
"compiler: gcc %d.%d.%d\n",
__GNUC__,
__GNUC_MINOR__,
__GNUC_PATCHLEVEL__);
fflush(stdout);
assert(malloc_balance == 0);
assert(memtrack_stack_top < 0);
assert(start_stack_top < 0);
return 0;
}
|
kdtree_index.h | /***********************************************************************
* Software License Agreement (BSD License)
*
* Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved.
* Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved.
*
* THE BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*************************************************************************/
#ifndef RTABMAP_FLANN_KDTREE_INDEX_H_
#define RTABMAP_FLANN_KDTREE_INDEX_H_
#include <algorithm>
#include <map>
#include <cassert>
#include <cstring>
#include <stdarg.h>
#include <cmath>
#include "rtflann/general.h"
#include "rtflann/algorithms/nn_index.h"
#include "rtflann/util/dynamic_bitset.h"
#include "rtflann/util/matrix.h"
#include "rtflann/util/result_set.h"
#include "rtflann/util/heap.h"
#include "rtflann/util/allocator.h"
#include "rtflann/util/random.h"
#include "rtflann/util/saving.h"
namespace rtflann
{
struct KDTreeIndexParams : public IndexParams
{
KDTreeIndexParams(int trees = 4)
{
(*this)["algorithm"] = FLANN_INDEX_KDTREE;
(*this)["trees"] = trees;
}
};
/**
* Randomized kd-tree index
*
* Contains the k-d trees and other information for indexing a set of points
* for nearest-neighbor matching.
*/
template <typename Distance>
class KDTreeIndex : public NNIndex<Distance>
{
public:
typedef typename Distance::ElementType ElementType;
typedef typename Distance::ResultType DistanceType;
typedef NNIndex<Distance> BaseClass;
typedef bool needs_kdtree_distance;
private:
/*--------------------- Internal Data Structures --------------------------*/
struct Node
{
/**
* Dimension used for subdivision.
*/
int divfeat;
/**
* The values used for subdivision.
*/
DistanceType divval;
/**
* Point data
*/
ElementType* point;
/**
* The child nodes.
*/
Node* child1, *child2;
Node(){
child1 = NULL;
child2 = NULL;
}
~Node() {
if (child1 != NULL) { child1->~Node(); child1 = NULL; }
if (child2 != NULL) { child2->~Node(); child2 = NULL; }
}
private:
template<typename Archive>
void serialize(Archive& ar)
{
typedef KDTreeIndex<Distance> Index;
Index* obj = static_cast<Index*>(ar.getObject());
ar & divfeat;
ar & divval;
bool leaf_node = false;
if (Archive::is_saving::value) {
leaf_node = ((child1==NULL) && (child2==NULL));
}
ar & leaf_node;
if (leaf_node) {
if (Archive::is_loading::value) {
point = obj->points_[divfeat];
}
}
if (!leaf_node) {
if (Archive::is_loading::value) {
child1 = new(obj->pool_) Node();
child2 = new(obj->pool_) Node();
}
ar & *child1;
ar & *child2;
}
}
friend struct serialization::access;
};
typedef Node* NodePtr;
typedef BranchStruct<NodePtr, DistanceType> BranchSt;
typedef BranchSt* Branch;
public:
/**
* KDTree constructor
*
* Params:
* inputData = dataset with the input features
* params = parameters passed to the kdtree algorithm
*/
KDTreeIndex(const IndexParams& params = KDTreeIndexParams(), Distance d = Distance() ) :
BaseClass(params, d), mean_(NULL), var_(NULL)
{
trees_ = get_param(index_params_,"trees",4);
}
/**
* KDTree constructor
*
* Params:
* inputData = dataset with the input features
* params = parameters passed to the kdtree algorithm
*/
KDTreeIndex(const Matrix<ElementType>& dataset, const IndexParams& params = KDTreeIndexParams(),
Distance d = Distance() ) : BaseClass(params,d ), mean_(NULL), var_(NULL)
{
trees_ = get_param(index_params_,"trees",4);
setDataset(dataset);
}
KDTreeIndex(const KDTreeIndex& other) : BaseClass(other),
trees_(other.trees_)
{
tree_roots_.resize(other.tree_roots_.size());
for (size_t i=0;i<tree_roots_.size();++i) {
copyTree(tree_roots_[i], other.tree_roots_[i]);
}
}
KDTreeIndex& operator=(KDTreeIndex other)
{
this->swap(other);
return *this;
}
/**
* Standard destructor
*/
virtual ~KDTreeIndex()
{
freeIndex();
}
BaseClass* clone() const
{
return new KDTreeIndex(*this);
}
using BaseClass::buildIndex;
void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2)
{
assert(points.cols==veclen_);
size_t old_size = size_;
extendDataset(points);
if (rebuild_threshold>1 && size_at_build_*rebuild_threshold<size_) {
buildIndex();
}
else {
for (size_t i=old_size;i<size_;++i) {
for (int j = 0; j < trees_; j++) {
addPointToTree(tree_roots_[j], i);
}
}
}
}
flann_algorithm_t getType() const
{
return FLANN_INDEX_KDTREE;
}
template<typename Archive>
void serialize(Archive& ar)
{
ar.setObject(this);
ar & *static_cast<NNIndex<Distance>*>(this);
ar & trees_;
if (Archive::is_loading::value) {
tree_roots_.resize(trees_);
}
for (size_t i=0;i<tree_roots_.size();++i) {
if (Archive::is_loading::value) {
tree_roots_[i] = new(pool_) Node();
}
ar & *tree_roots_[i];
}
if (Archive::is_loading::value) {
index_params_["algorithm"] = getType();
index_params_["trees"] = trees_;
}
}
void saveIndex(FILE* stream)
{
serialization::SaveArchive sa(stream);
sa & *this;
}
void loadIndex(FILE* stream)
{
freeIndex();
serialization::LoadArchive la(stream);
la & *this;
}
/**
* Computes the inde memory usage
* Returns: memory used by the index
*/
int usedMemory() const
{
return int(pool_.usedMemory+pool_.wastedMemory+size_*sizeof(int)); // pool memory and vind array memory
}
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const
{
int maxChecks = searchParams.checks;
float epsError = 1+searchParams.eps;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
if (removed_) {
getExactNeighbors<true>(result, vec, epsError);
}
else {
getExactNeighbors<false>(result, vec, epsError);
}
}
else {
if (removed_) {
getNeighbors<true>(result, vec, maxChecks, epsError);
}
else {
getNeighbors<false>(result, vec, maxChecks, epsError);
}
}
}
#ifdef FLANN_KDTREE_MEM_OPT
/**
* Find set of nearest neighbors to vec. Their indices are stored inside
* the result object.
*
* Params:
* result = the result object in which the indices of the nearest-neighbors are stored
* vec = the vector for which to search the nearest neighbors
* maxCheck = the maximum number of restarts (in a best-bin-first manner)
*/
void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams, Heap<BranchSt>* heap) const
{
int maxChecks = searchParams.checks;
float epsError = 1+searchParams.eps;
if (maxChecks==FLANN_CHECKS_UNLIMITED) {
if (removed_) {
getExactNeighbors<true>(result, vec, epsError);
}
else {
getExactNeighbors<false>(result, vec, epsError);
}
}
else {
if (removed_) {
getNeighbors<true>(result, vec, maxChecks, epsError, heap);
}
else {
getNeighbors<false>(result, vec, maxChecks, epsError, heap);
}
}
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
assert(indices.rows >= queries.rows);
assert(dists.rows >= queries.rows);
assert(indices.cols >= knn);
assert(dists.cols >= knn);
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
int count = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
if (use_heap) {
//#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
}
else {
std::vector<double> times(queries.rows);
//#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
resultSet.copy(indices[i], dists[i], n, params.sorted);
indices_to_ids(indices[i], indices[i], n);
count += n;
}
}
std::sort(times.begin(), times.end());
}
delete heap;
return count;
}
/**
* @brief Perform k-nearest neighbor search
* @param[in] queries The query points for which to find the nearest neighbors
* @param[out] indices The indices of the nearest neighbors found
* @param[out] dists Distances to the nearest neighbors found
* @param[in] knn Number of nearest neighbors to return
* @param[in] params Search parameters
*/
virtual int knnSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
size_t knn,
const SearchParams& params) const
{
assert(queries.cols == veclen());
bool use_heap;
if (params.use_heap==FLANN_Undefined) {
use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false;
}
else {
use_heap = (params.use_heap==FLANN_True)?true:false;
}
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
int count = 0;
if (use_heap) {
//#pragma omp parallel num_threads(params.cores)
{
KNNResultSet2<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
else {
//#pragma omp parallel num_threads(params.cores)
{
KNNSimpleResultSet<DistanceType> resultSet(knn);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = std::min(resultSet.size(), knn);
indices[i].resize(n);
dists[i].resize(n);
if (n>0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
count += n;
}
}
}
delete heap;
return count;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
Matrix<size_t>& indices,
Matrix<DistanceType>& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
size_t num_neighbors = std::min(indices.cols, dists.cols);
int max_neighbors = params.max_neighbors;
if (max_neighbors<0) max_neighbors = num_neighbors;
else max_neighbors = std::min(max_neighbors,(int)num_neighbors);
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
if (max_neighbors==0) {
//#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
count += resultSet.size();
}
}
}
else {
// explicitly indicated to use unbounded radius result set
// and we know there'll be enough room for resulting indices and dists
if (params.max_neighbors<0 && (num_neighbors>=this->size())) {
//#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if (n>num_neighbors) n = num_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
else {
// number of neighbors limited to max_neighbors
//#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if ((int)n>max_neighbors) n = max_neighbors;
resultSet.copy(indices[i], dists[i], n, params.sorted);
// mark the next element in the output buffers as unused
if (n<indices.cols) indices[i][n] = size_t(-1);
if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity();
indices_to_ids(indices[i], indices[i], n);
}
}
}
}
delete heap;
return count;
}
/**
* @brief Perform radius search
* @param[in] query The query point
* @param[out] indices The indices of the neighbors found within the given radius
* @param[out] dists The distances to the nearest neighbors found
* @param[in] radius The radius used for search
* @param[in] params Search parameters
* @return Number of neighbors found
*/
virtual int radiusSearch(const Matrix<ElementType>& queries,
std::vector< std::vector<size_t> >& indices,
std::vector<std::vector<DistanceType> >& dists,
float radius,
const SearchParams& params) const
{
assert(queries.cols == veclen());
int count = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
// just count neighbors
if (params.max_neighbors==0) {
//#pragma omp parallel num_threads(params.cores)
{
CountRadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
count += resultSet.size();
}
}
}
else {
if (indices.size() < queries.rows ) indices.resize(queries.rows);
if (dists.size() < queries.rows ) dists.resize(queries.rows);
if (params.max_neighbors<0) {
// search for all neighbors
//#pragma omp parallel num_threads(params.cores)
{
RadiusResultSet<DistanceType> resultSet(radius);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
else {
// number of neighbors limited to max_neighbors
//#pragma omp parallel num_threads(params.cores)
{
KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors);
//#pragma omp for schedule(static) reduction(+:count)
for (int i = 0; i < (int)queries.rows; i++) {
resultSet.clear();
findNeighbors(resultSet, queries[i], params, heap);
size_t n = resultSet.size();
count += n;
if ((int)n>params.max_neighbors) n = params.max_neighbors;
indices[i].resize(n);
dists[i].resize(n);
if (n > 0) {
resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted);
indices_to_ids(&indices[i][0], &indices[i][0], n);
}
}
}
}
}
delete heap;
return count;
}
#endif
protected:
/**
* Builds the index
*/
void buildIndexImpl()
{
// Create a permutable array of indices to the input vectors.
std::vector<int> ind(size_);
for (size_t i = 0; i < size_; ++i) {
ind[i] = int(i);
}
mean_ = new DistanceType[veclen_];
var_ = new DistanceType[veclen_];
tree_roots_.resize(trees_);
/* Construct the randomized trees. */
for (int i = 0; i < trees_; i++) {
/* Randomize the order of vectors to allow for unbiased sampling. */
std::random_shuffle(ind.begin(), ind.end());
tree_roots_[i] = divideTree(&ind[0], int(size_) );
}
delete[] mean_;
delete[] var_;
}
void freeIndex()
{
for (size_t i=0;i<tree_roots_.size();++i) {
// using placement new, so call destructor explicitly
if (tree_roots_[i]!=NULL) tree_roots_[i]->~Node();
}
pool_.free();
}
private:
void copyTree(NodePtr& dst, const NodePtr& src)
{
dst = new(pool_) Node();
dst->divfeat = src->divfeat;
dst->divval = src->divval;
if (src->child1==NULL && src->child2==NULL) {
dst->point = points_[dst->divfeat];
dst->child1 = NULL;
dst->child2 = NULL;
}
else {
copyTree(dst->child1, src->child1);
copyTree(dst->child2, src->child2);
}
}
/**
* Create a tree node that subdivides the list of vecs from vind[first]
* to vind[last]. The routine is called recursively on each sublist.
* Place a pointer to this new tree node in the location pTree.
*
* Params: pTree = the new node to create
* first = index of the first vector
* last = index of the last vector
*/
NodePtr divideTree(int* ind, int count)
{
NodePtr node = new(pool_) Node(); // allocate memory
/* If too few exemplars remain, then make this a leaf node. */
if (count == 1) {
node->child1 = node->child2 = NULL; /* Mark as leaf node. */
node->divfeat = *ind; /* Store index of this vec. */
node->point = points_[*ind];
}
else {
int idx;
int cutfeat;
DistanceType cutval;
meanSplit(ind, count, idx, cutfeat, cutval);
node->divfeat = cutfeat;
node->divval = cutval;
node->child1 = divideTree(ind, idx);
node->child2 = divideTree(ind+idx, count-idx);
}
return node;
}
/**
* Choose which feature to use in order to subdivide this set of vectors.
* Make a random choice among those with the highest variance, and use
* its variance as the threshold value.
*/
void meanSplit(int* ind, int count, int& index, int& cutfeat, DistanceType& cutval)
{
memset(mean_,0,veclen_*sizeof(DistanceType));
memset(var_,0,veclen_*sizeof(DistanceType));
/* Compute mean values. Only the first SAMPLE_MEAN values need to be
sampled to get a good estimate.
*/
int cnt = std::min((int)SAMPLE_MEAN+1, count);
for (int j = 0; j < cnt; ++j) {
ElementType* v = points_[ind[j]];
for (size_t k=0; k<veclen_; ++k) {
mean_[k] += v[k];
}
}
DistanceType div_factor = DistanceType(1)/cnt;
for (size_t k=0; k<veclen_; ++k) {
mean_[k] *= div_factor;
}
/* Compute variances (no need to divide by count). */
for (int j = 0; j < cnt; ++j) {
ElementType* v = points_[ind[j]];
for (size_t k=0; k<veclen_; ++k) {
DistanceType dist = v[k] - mean_[k];
var_[k] += dist * dist;
}
}
/* Select one of the highest variance indices at random. */
cutfeat = selectDivision(var_);
cutval = mean_[cutfeat];
int lim1, lim2;
planeSplit(ind, count, cutfeat, cutval, lim1, lim2);
if (lim1>count/2) index = lim1;
else if (lim2<count/2) index = lim2;
else index = count/2;
/* If either list is empty, it means that all remaining features
* are identical. Split in the middle to maintain a balanced tree.
*/
if ((lim1==count)||(lim2==0)) index = count/2;
}
/**
* Select the top RAND_DIM largest values from v and return the index of
* one of these selected at random.
*/
int selectDivision(DistanceType* v)
{
int num = 0;
size_t topind[RAND_DIM];
/* Create a list of the indices of the top RAND_DIM values. */
for (size_t i = 0; i < veclen_; ++i) {
if ((num < RAND_DIM)||(v[i] > v[topind[num-1]])) {
/* Put this element at end of topind. */
if (num < RAND_DIM) {
topind[num++] = i; /* Add to list. */
}
else {
topind[num-1] = i; /* Replace last element. */
}
/* Bubble end value down to right location by repeated swapping. */
int j = num - 1;
while (j > 0 && v[topind[j]] > v[topind[j-1]]) {
std::swap(topind[j], topind[j-1]);
--j;
}
}
}
/* Select a random integer in range [0,num-1], and return that index. */
int rnd = rand_int(num);
return (int)topind[rnd];
}
/**
* Subdivide the list of points by a plane perpendicular on axe corresponding
* to the 'cutfeat' dimension at 'cutval' position.
*
* On return:
* dataset[ind[0..lim1-1]][cutfeat]<cutval
* dataset[ind[lim1..lim2-1]][cutfeat]==cutval
* dataset[ind[lim2..count]][cutfeat]>cutval
*/
void planeSplit(int* ind, int count, int cutfeat, DistanceType cutval, int& lim1, int& lim2)
{
/* Move vector indices for left subtree to front of list. */
int left = 0;
int right = count-1;
for (;; ) {
while (left<=right && points_[ind[left]][cutfeat]<cutval) ++left;
while (left<=right && points_[ind[right]][cutfeat]>=cutval) --right;
if (left>right) break;
std::swap(ind[left], ind[right]); ++left; --right;
}
lim1 = left;
right = count-1;
for (;; ) {
while (left<=right && points_[ind[left]][cutfeat]<=cutval) ++left;
while (left<=right && points_[ind[right]][cutfeat]>cutval) --right;
if (left>right) break;
std::swap(ind[left], ind[right]); ++left; --right;
}
lim2 = left;
}
/**
* Performs an exact nearest neighbor search. The exact search performs a full
* traversal of the tree.
*/
template<bool with_removed>
void getExactNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, float epsError) const
{
// checkID -= 1; /* Set a different unique ID for each search. */
if (trees_ > 1) {
fprintf(stderr,"It doesn't make any sense to use more than one tree for exact search");
}
if (trees_>0) {
searchLevelExact<with_removed>(result, vec, tree_roots_[0], 0.0, epsError);
}
}
/**
* Performs the approximate nearest-neighbor search. The search is approximate
* because the tree traversal is abandoned after a given number of descends in
* the tree.
*/
template<bool with_removed>
void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError) const
{
int i;
BranchSt branch;
int checkCount = 0;
Heap<BranchSt>* heap = new Heap<BranchSt>((int)size_);
DynamicBitset checked(size_);
/* Search once through each tree down to root. */
for (i = 0; i < trees_; ++i) {
searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
}
/* Keep searching other branches from heap until finished. */
while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
}
delete heap;
}
#ifdef FLANN_KDTREE_MEM_OPT
/**
* Performs the approximate nearest-neighbor search. The search is approximate
* because the tree traversal is abandoned after a given number of descends in
* the tree.
*/
template<bool with_removed>
void getNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, int maxCheck, float epsError, Heap<BranchSt>* heap) const
{
int i;
BranchSt branch;
int checkCount = 0;
DynamicBitset checked(size_);
heap->clear();
/* Search once through each tree down to root. */
for (i = 0; i < trees_; ++i) {
searchLevel<with_removed>(result, vec, tree_roots_[i], 0, checkCount, maxCheck, epsError, heap, checked);
}
/* Keep searching other branches from heap until finished. */
while ( heap->popMin(branch) && (checkCount < maxCheck || !result.full() )) {
searchLevel<with_removed>(result, vec, branch.node, branch.mindist, checkCount, maxCheck, epsError, heap, checked);
}
}
#endif
/**
* Search starting from a given node of the tree. Based on any mismatches at
* higher levels, all exemplars below this level must have a distance of
* at least "mindistsq".
*/
template<bool with_removed>
void searchLevel(ResultSet<DistanceType>& result_set, const ElementType* vec, NodePtr node, DistanceType mindist, int& checkCount, int maxCheck,
float epsError, Heap<BranchSt>* heap, DynamicBitset& checked) const
{
if (result_set.worstDist()<mindist) {
// printf("Ignoring branch, too far\n");
return;
}
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL)&&(node->child2 == NULL)) {
int index = node->divfeat;
if (with_removed) {
if (removed_points_.test(index)) return;
}
/* Do not check same node more than once when searching multiple trees. */
if ( checked.test(index) || ((checkCount>=maxCheck)&& result_set.full()) ) return;
checked.set(index);
checkCount++;
DistanceType dist = distance_(node->point, vec, veclen_);
result_set.addPoint(dist,index);
return;
}
/* Which child branch should be taken first? */
ElementType val = vec[node->divfeat];
DistanceType diff = val - node->divval;
NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
/* Create a branch record for the branch not taken. Add distance
of this feature boundary (we don't attempt to correct for any
use of this feature in a parent node, which is unlikely to
happen and would have only a small effect). Don't bother
adding more branches to heap after halfway point, as cost of
adding exceeds their value.
*/
DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
// if (2 * checkCount < maxCheck || !result.full()) {
if ((new_distsq*epsError < result_set.worstDist())|| !result_set.full()) {
heap->insert( BranchSt(otherChild, new_distsq) );
}
/* Call recursively to search next level down. */
searchLevel<with_removed>(result_set, vec, bestChild, mindist, checkCount, maxCheck, epsError, heap, checked);
}
/**
* Performs an exact search in the tree starting from a node.
*/
template<bool with_removed>
void searchLevelExact(ResultSet<DistanceType>& result_set, const ElementType* vec, const NodePtr node, DistanceType mindist, const float epsError) const
{
/* If this is a leaf node, then do check and return. */
if ((node->child1 == NULL)&&(node->child2 == NULL)) {
int index = node->divfeat;
if (with_removed) {
if (removed_points_.test(index)) return; // ignore removed points
}
DistanceType dist = distance_(node->point, vec, veclen_);
result_set.addPoint(dist,index);
return;
}
/* Which child branch should be taken first? */
ElementType val = vec[node->divfeat];
DistanceType diff = val - node->divval;
NodePtr bestChild = (diff < 0) ? node->child1 : node->child2;
NodePtr otherChild = (diff < 0) ? node->child2 : node->child1;
/* Create a branch record for the branch not taken. Add distance
of this feature boundary (we don't attempt to correct for any
use of this feature in a parent node, which is unlikely to
happen and would have only a small effect). Don't bother
adding more branches to heap after halfway point, as cost of
adding exceeds their value.
*/
DistanceType new_distsq = mindist + distance_.accum_dist(val, node->divval, node->divfeat);
/* Call recursively to search next level down. */
searchLevelExact<with_removed>(result_set, vec, bestChild, mindist, epsError);
if (mindist*epsError<=result_set.worstDist()) {
searchLevelExact<with_removed>(result_set, vec, otherChild, new_distsq, epsError);
}
}
void addPointToTree(NodePtr node, int ind)
{
ElementType* point = points_[ind];
if ((node->child1==NULL) && (node->child2==NULL)) {
ElementType* leaf_point = node->point;
ElementType max_span = 0;
size_t div_feat = 0;
for (size_t i=0;i<veclen_;++i) {
ElementType span = std::abs(point[i]-leaf_point[i]);
if (span > max_span) {
max_span = span;
div_feat = i;
}
}
NodePtr left = new(pool_) Node();
left->child1 = left->child2 = NULL;
NodePtr right = new(pool_) Node();
right->child1 = right->child2 = NULL;
if (point[div_feat]<leaf_point[div_feat]) {
left->divfeat = ind;
left->point = point;
right->divfeat = node->divfeat;
right->point = node->point;
}
else {
left->divfeat = node->divfeat;
left->point = node->point;
right->divfeat = ind;
right->point = point;
}
node->divfeat = div_feat;
node->divval = (point[div_feat]+leaf_point[div_feat])/2;
node->child1 = left;
node->child2 = right;
}
else {
if (point[node->divfeat]<node->divval) {
addPointToTree(node->child1,ind);
}
else {
addPointToTree(node->child2,ind);
}
}
}
private:
void swap(KDTreeIndex& other)
{
BaseClass::swap(other);
std::swap(trees_, other.trees_);
std::swap(tree_roots_, other.tree_roots_);
std::swap(pool_, other.pool_);
}
private:
enum
{
/**
* To improve efficiency, only SAMPLE_MEAN random values are used to
* compute the mean and variance at each level when building a tree.
* A value of 100 seems to perform as well as using all values.
*/
SAMPLE_MEAN = 100,
/**
* Top random dimensions to consider
*
* When creating random trees, the dimension on which to subdivide is
* selected at random from among the top RAND_DIM dimensions with the
* highest variance. A value of 5 works well.
*/
RAND_DIM=5
};
/**
* Number of randomized trees that are used
*/
int trees_;
DistanceType* mean_;
DistanceType* var_;
/**
* Array of k-d trees used to find neighbours.
*/
std::vector<NodePtr> tree_roots_;
/**
* Pooled memory allocator.
*
* Using a pooled memory allocator is more efficient
* than allocating memory directly when there is a large
* number small of memory allocations.
*/
PooledAllocator pool_;
USING_BASECLASS_SYMBOLS
}; // class KDTreeIndex
}
#endif //FLANN_KDTREE_INDEX_H_
|
omp_parallel_for_firstprivate.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for firstprivate directive.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for firstprivate</ompts:directive>
<ompts:dependences>omp parallel for reduction,omp parallel for private</ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_for_firstprivate</ompts:testcode:functionname>(FILE * logFile)
{
int sum = 0;
int known_sum;
int i2 = 3;
int i;
#pragma omp parallel for reduction(+:sum) <ompts:check>firstprivate(i2)</ompts:check><ompts:crosscheck>private(i2)</ompts:crosscheck>
for (i = 1; i <= LOOPCOUNT; i++)
{
sum = sum + (i + i2);
} /*end of for*/
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2 + i2 * LOOPCOUNT;
return (known_sum == sum);
} /* end of check_parallel_for_fistprivate */
</ompts:testcode>
</ompts:test>
|
GB_builder.c | //------------------------------------------------------------------------------
// GB_builder: build a matrix from tuples
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// CALLED BY: GB_build, GB_wait, GB_transpose, GB_concat_hyper
// This function is called by GB_build to build a matrix T for GrB_Matrix_build
// or GrB_Vector_build, by GB_wait to build a matrix T from the list of pending
// tuples, and by GB_transpose to transpose a matrix or vector. Duplicates can
// appear if called by GB_build or GB_wait, but not GB_transpose.
// The indices are provided either as (I_input,J_input) or (I_work,J_work), not
// both. The values are provided as S_input or S_work, not both. On return,
// the *work arrays are either transplanted into T, or freed, since they are
// temporary workspaces.
// The work is done in major 5 Steps, some of which can be skipped, depending
// on how the tuples are provided (*_work or *_input), and whether or not they
// are sorted, or have duplicates. If vdim <= 1, some work is skipped (for
// GrB_Vectors, and single-vector GrB_Matrices). Let e be the of tuples on
// input. Let p be the # of threads used.
// STEP 1: copy user input. O(e/p) read/write per thread, or skipped.
// STEP 2: sort the tuples. Time: O((e log e)/p), read/write, or skipped if
// the tuples are already sorted.
// STEP 3: count vectors and duplicates. O(e/p) reads, per thread, if no
// duplicates, or skipped if already done. O(e/p) read/writes
// per thread if duplicates appear.
// STEP 4: construct T->h and T->p. O(e/p) reads per thread, or skipped if
// T is a vector.
// STEP 5: assemble the tuples. O(e/p) read/writes per thread, or O(1) if the
// values can be transplanted into T as-is.
// For GrB_Matrix_build: If the input (I_input, J_input, S_input) is already
// sorted with no duplicates, and no typecasting needs to be done, then Step 1
// still must be done (each thread does O(e/p) reads of (I_input,J_input) and
// writes to I_work), but Step 1 also does the work for Step 3. Step 2 and 3
// are skipped. Step 4 does O(e/p) reads per thread (J_input only). Then
// I_work is transplanted into T->i. Step 5 does O(e/p) read/writes per thread
// to copy Sx into T->x.
// For GrB_Vector_build: as GrB_Matrix_build, Step 1 does O(e/p) read/writes
// per thread. The input is always a vector, so vdim == 1 always holds. Step
// 2 is skipped if the indices are already sorted, and Step 3 does no work at
// all unless duplicates appear. Step 4 takes no time, for any vector. Step 5
// does O(e/p) reads/writes per thread.
// For GB_wait: the pending tuples are provided as I_work, J_work, and S_work,
// so Step 1 is skipped (no need to check for invalid indices). The input
// J_work may be null (vdim can be anything, since GB_wait is used for both
// vectors and matrices). The tuples might be in sorted order already, which
// is known precisely known from A->Pending->sorted. Step 2 does
// O((e log e)/p) work to sort the tuples. Duplicates may appear, and
// out-of-order tuples are likely. Step 3 does O(e/p) read/writes. Step 4
// does O(e/p) reads per thread of (I_work,J_work), or just I_work. Step 5
// does O(e/p) read/writes per thread, or O(1) time if S_work can be
// transplanted into T->x.
// For GB_transpose: uses I_work, J_work, and either S_input (if no op applied
// to the values) or S_work (if an op was applied to the A->x values). This is
// only done for matrices, not vectors, so vdim > 1 will always hold. The
// indices are valid so Step 1 is skipped. The tuples are not sorted, so Step
// 2 takes O((e log e)/p) time to do the sort. There are no duplicates, so
// Step 3 only does O(e/p) reads of J_work to count the vectors in each slice.
// Step 4 only does O(e/p) reads of J_work to compute T->h and T->p. Step 5
// does O(e/p) read/writes per thread, but it uses the simpler case in
// GB_reduce_build_template since no duplicates can appear. It is unlikely
// able to transplant S_work into T->x since the input will almost always be
// unsorted.
// For GB_concat_hyper: uses I_work, J_work, and S_work. No duplicates
// appear. Tuples are not sorted on input. I_work is transplanted into C->i.
// J_work and S_work are freed on output. S_work is not transplanted into
// C->x.
// For iso inputs/outputs: T and Sx have the same iso property. If
// they are iso, then dup is always NULL. Duplicates may or may not appear
// if T and Sx are iso.
// (1) GrB_Matrix_build, GrB_Vector_build, and GB_wait do not pass in an iso
// Sx array, where Sx is S_input for GrB*build, and S_work for GB_wait.
// Sx and Tx are not iso. Duplicates may appear. dup is always present
// for GrB*build, but may be either NULL or non-NULL for GB_wait.
// (2) GxB_Matrix_build_Scalar and GxB_Vector_build_Scalar: always construct
// iso matrices. For those methods Sx and Tx are always iso, and no dup
// operator is be passed in (dup is NULL here, which is the implied 2nd
// operator). Duplicates may appear.
// (3) GB_transpose and GB_concat_hyper can pass in Sx as iso or
// non-iso, and always passes in dup as NULL since there are no
// duplicates. Sx and Tx are either both iso, or both non-iso.
// This method always returns T as hypersparse, and T is iso if and only
// if Sx is iso.
#include "GB_build.h"
#include "GB_sort.h"
#include "GB_binop.h"
#ifndef GBCOMPACT
#include "GB_red__include.h"
#endif
#define GB_I_WORK(t) (((t) < 0) ? -1 : I_work [t])
#define GB_J_WORK(t) (((t) < 0) ? -1 : ((J_work == NULL) ? 0 : J_work [t]))
#define GB_K_WORK(t) (((t) < 0) ? -1 : ((K_work == NULL) ? t : K_work [t]))
#define GB_FREE_WORKSPACE \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_FREE (I_work_handle, *I_work_size_handle) ; \
GB_FREE (J_work_handle, *J_work_size_handle) ; \
GB_FREE (S_work_handle, *S_work_size_handle) ; \
GB_FREE_WORK (&K_work, K_work_size) ; \
}
//------------------------------------------------------------------------------
// GB_builder
//------------------------------------------------------------------------------
GrB_Info GB_builder // build a matrix from tuples
(
GrB_Matrix T, // matrix to build, static or dynamic header
const GrB_Type ttype, // type of output matrix T
const int64_t vlen, // length of each vector of T
const int64_t vdim, // number of vectors in T
const bool is_csc, // true if T is CSC, false if CSR
int64_t **I_work_handle, // for (i,k) or (j,i,k) tuples
size_t *I_work_size_handle,
int64_t **J_work_handle, // for (j,i,k) tuples
size_t *J_work_size_handle,
GB_void **S_work_handle, // array of values of tuples, size ijslen,
// or size 1 if S is iso
size_t *S_work_size_handle,
bool known_sorted, // true if tuples known to be sorted
bool known_no_duplicates, // true if tuples known to not have dupl
int64_t ijslen, // size of I_work and J_work arrays
const bool is_matrix, // true if T a GrB_Matrix, false if vector
const int64_t *restrict I_input,// original indices, size nvals
const int64_t *restrict J_input,// original indices, size nvals
const GB_void *restrict S_input,// array of values of tuples, size nvals,
// or size 1 if S_input or S_work are iso
const bool S_iso, // true if S_input or S_work are iso
const int64_t nvals, // number of tuples, and size of K_work
const GrB_BinaryOp dup, // binary function to assemble duplicates,
// if NULL use the SECOND operator to
// keep the most recent duplicate.
const GrB_Type stype, // the type of S_work or S_input
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (T != NULL) ; // T is a static or dynamic header on input
ASSERT (nvals >= 0) ;
ASSERT_TYPE_OK (ttype, "ttype for builder", GB0) ;
ASSERT_BINARYOP_OK_OR_NULL (dup, "dup for builder", GB0) ;
ASSERT (I_work_handle != NULL) ;
ASSERT (J_work_handle != NULL) ;
ASSERT (S_work_handle != NULL) ;
ASSERT (!GB_OP_IS_POSITIONAL (dup)) ;
ASSERT (I_work_size_handle != NULL) ;
ASSERT (J_work_size_handle != NULL) ;
ASSERT (S_work_size_handle != NULL) ;
//--------------------------------------------------------------------------
// get Sx
//--------------------------------------------------------------------------
GB_void *restrict S_work = (*S_work_handle) ;
const GB_void *restrict Sx = (S_work == NULL) ? S_input : S_work ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
ASSERT (GB_IMPLIES (S_iso, ttype == stype)) ;
ASSERT (GB_IMPLIES (S_iso, dup == NULL)) ;
//==========================================================================
// symbolic phase of the build =============================================
//==========================================================================
// The symbolic phase sorts the tuples and finds any duplicates. The
// output matrix T is constructed (not including T->i and T->x), and T->h
// and T->p are computed. Then I_work is transplanted into T->i, or T->i is
// allocated. T->x is then allocated. It is not computed until the
// numeric phase.
// When this function returns, I_work is either freed or transplanted into
// T->i. J_work is freed, and the I_work and J_work pointers (in the
// caller) are set to NULL by setting their handles to NULL. Note that
// J_work may already be NULL on input, if T has one or zero vectors
// (J_work_handle is always non-NULL however).
GrB_Info info ;
int64_t *restrict I_work = (*I_work_handle) ;
int64_t *restrict J_work = (*J_work_handle) ;
int64_t *restrict K_work = NULL ; size_t K_work_size = 0 ;
ASSERT (*J_work_size_handle == GB_Global_memtable_size (J_work)) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (nvals, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
GB_WERK_PUSH (Work, 5*(nthreads+1), int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
memset (Work, 0, Work_nitems * sizeof (int64_t)) ;
int64_t *restrict tstart_slice = Work ; // nthreads+1
int64_t *restrict tnvec_slice = Work + (nthreads+1) ; // nthreads+1
int64_t *restrict tnz_slice = Work + 2*(nthreads+1) ; // nthreads+1
int64_t *restrict kbad = Work + 3*(nthreads+1) ; // nthreads
int64_t *restrict ilast_slice = Work + 4*(nthreads+1) ; // nthreads
//--------------------------------------------------------------------------
// partition the tuples for the threads
//--------------------------------------------------------------------------
// Thread tid handles tuples tstart_slice [tid] to tstart_slice [tid+1]-1.
// Each thread handles about the same number of tuples. This partition
// depends only on nvals.
GB_eslice (tstart_slice, nvals, nthreads) ;
// tstart_slice [tid]: first tuple in slice tid
// tnvec_slice [tid]: # of vectors that start in a slice. If a vector
// starts in one slice and ends in another, it is
// counted as being in the first slice.
// tnz_slice [tid]: # of entries in a slice after removing duplicates
// sentinel values for the final cumulative sum
tnvec_slice [nthreads] = 0 ;
tnz_slice [nthreads] = 0 ;
// this becomes true if the first pass computes tnvec_slice and tnz_slice,
// and if the (I_input,J_input) tuples were found to be already sorted with
// no duplicates present.
bool tnvec_and_tnz_slice_computed = false ;
//--------------------------------------------------------------------------
// STEP 1: copy user input and check if valid
//--------------------------------------------------------------------------
// If the indices are provided by (I_input,J_input), then import them into
// (I_work,J_work) and check if they are valid, and sorted. If the input
// happens to be already sorted, then duplicates are detected and the # of
// vectors in each slice is counted.
if (I_work == NULL)
{
//----------------------------------------------------------------------
// allocate I_work
//----------------------------------------------------------------------
// allocate workspace to load and sort the index tuples:
// vdim <= 1: I_work and K_work for (i,k) tuples, where i = I_input [k]
// vdim > 1: also J_work for (j,i,k) tuples where i = I_input [k] and
// j = J_input [k]. If the tuples are found to be already sorted on
// input, then J_work is not allocated, and J_input is used instead.
// The k value in the tuple gives the position in the original set of
// tuples: I_input [k] and Sx [k] when vdim <= 1, and also J_input [k]
// for matrices with vdim > 1.
// The workspace I_work and J_work are allocated here but freed (or
// transplanted) inside GB_builder. K_work is allocated, used, and
// freed in GB_builder.
ASSERT (J_work == NULL) ;
I_work = GB_MALLOC (nvals, int64_t, I_work_size_handle) ;
(*I_work_handle) = I_work ;
ijslen = nvals ;
if (I_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// create the tuples to sort, and check for any invalid indices
//----------------------------------------------------------------------
known_sorted = true ;
bool no_duplicates_found = true ;
if (nvals == 0)
{
// nothing to do
}
else if (is_matrix)
{
//------------------------------------------------------------------
// C is a matrix; check both I_input and J_input
//------------------------------------------------------------------
ASSERT (J_input != NULL) ;
ASSERT (I_work != NULL) ;
ASSERT (vdim >= 0) ;
ASSERT (I_input != NULL) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t my_tnvec = 0 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
int64_t jlast = (kstart == 0) ? -1 : J_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i,j)
int64_t i = I_input [k] ;
int64_t j = J_input [k] ;
if (i < 0 || i >= vlen || j < 0 || j >= vdim)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted &&
((jlast < j) || (jlast == j && ilast <= i)) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(jlast == j && ilast == i)) ;
// copy the tuple into I_work. J_work is done later.
I_work [k] = i ;
if (j > jlast)
{
// vector j starts in this slice (but this is
// valid only if J_input is sorted on input)
my_tnvec++ ;
}
// log the last index seen
ilast = i ; jlast = j ;
}
// these are valid only if I_input and J_input are sorted on
// input, with no duplicates present.
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = kend - kstart ;
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
int64_t j = J_input [kbad [tid]] ;
int64_t row = is_csc ? i : j ;
int64_t col = is_csc ? j : i ;
int64_t nrows = is_csc ? vlen : vdim ;
int64_t ncols = is_csc ? vdim : vlen ;
GB_FREE_WORKSPACE ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd "," GBd ") out of bounds,"
" must be < (" GBd ", " GBd ")",
row, col, nrows, ncols) ;
}
}
// if the tuples were found to be already in sorted order, and if
// no duplicates were found, then tnvec_slice and tnz_slice are now
// valid, Otherwise, they can only be computed after sorting.
tnvec_and_tnz_slice_computed = known_sorted && no_duplicates_found ;
//------------------------------------------------------------------
// allocate J_work, if needed
//------------------------------------------------------------------
if (vdim > 1 && !known_sorted)
{
// copy J_input into J_work, so the tuples can be sorted
J_work = GB_MALLOC (nvals, int64_t, J_work_size_handle) ;
(*J_work_handle) = J_work ;
if (J_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_memcpy (J_work, J_input, nvals * sizeof (int64_t), nthreads);
}
else
{
// J_work is a shallow copy of J_input. The pointer is not
// copied into (*J_work_handle), so it will not be freed.
// J_input is not modified, even though it is typecast to the
// int64_t *J_work, since J_work is not modified in this case.
J_work = (int64_t *) J_input ;
}
}
else
{
//------------------------------------------------------------------
// C is a typecasted GrB_Vector; check only I_input
//------------------------------------------------------------------
ASSERT (I_input != NULL) ;
ASSERT (J_input == NULL) ;
ASSERT (vdim == 1) ;
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static) \
reduction(&&:known_sorted) reduction(&&:no_duplicates_found)
for (tid = 0 ; tid < nthreads ; tid++)
{
kbad [tid] = -1 ;
int64_t kstart = tstart_slice [tid] ;
int64_t kend = tstart_slice [tid+1] ;
int64_t ilast = (kstart == 0) ? -1 : I_input [kstart-1] ;
for (int64_t k = kstart ; k < kend ; k++)
{
// get k-th index from user input: (i)
int64_t i = I_input [k] ;
if (i < 0 || i >= vlen)
{
// halt if out of bounds
kbad [tid] = k ;
break ;
}
// check if the tuples are already sorted
known_sorted = known_sorted && (ilast <= i) ;
// check if this entry is a duplicate of the one before it
no_duplicates_found = no_duplicates_found &&
(!(ilast == i)) ;
// copy the tuple into the work arrays to be sorted
I_work [k] = i ;
// log the last index seen
ilast = i ;
}
}
// collect the report from each thread
for (int tid = 0 ; tid < nthreads ; tid++)
{
if (kbad [tid] >= 0)
{
// invalid index
int64_t i = I_input [kbad [tid]] ;
GB_FREE_WORKSPACE ;
GB_ERROR (GrB_INDEX_OUT_OF_BOUNDS,
"index (" GBd ") out of bounds, must be < (" GBd ")",
i, vlen) ;
}
}
}
//----------------------------------------------------------------------
// determine if duplicates are possible
//----------------------------------------------------------------------
// The input is now known to be sorted, or not. If it is sorted, and
// if no duplicates were found, then it is known to have no duplicates.
// Otherwise, duplicates might appear, but a sort is required first to
// check for duplicates.
known_no_duplicates = known_sorted && no_duplicates_found ;
}
//--------------------------------------------------------------------------
// STEP 2: sort the tuples in ascending order
//--------------------------------------------------------------------------
// If the tuples are known to already be sorted, Step 2 is skipped. In
// that case, K_work is NULL (not allocated), which implicitly means that
// K_work [k] = k for all k = 0:nvals-1. K_work is always NULL if Sx and
// Tx are iso.
if (!known_sorted)
{
//----------------------------------------------------------------------
// allocate K_work workspace (not needed if T and Sx are iso)
//----------------------------------------------------------------------
if (!S_iso)
{
// create the k part of each tuple
K_work = GB_MALLOC_WORK (nvals, int64_t, &K_work_size) ;
if (K_work == NULL)
{
// out of memory
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
// The k part of each tuple (i,k) or (j,i,k) records the original
// position of the tuple in the input list. This allows an
// unstable sorting algorithm to be used. Since k is unique, it
// forces the result of the sort to be stable regardless of whether
// or not the sorting algorithm is stable. It also keeps track of
// where the numerical value of the tuple can be found; it is in
// Sx[k] for the tuple (i,k) or (j,i,k), regardless of where the
// tuple appears in the list after it is sorted.
int64_t k ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (k = 0 ; k < nvals ; k++)
{
K_work [k] = k ;
}
}
//----------------------------------------------------------------------
// sort all the tuples
//----------------------------------------------------------------------
if (vdim > 1)
{
//------------------------------------------------------------------
// sort a set of (j,i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (j,i)
info = GB_msort_2 (J_work, I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_3 (J_work, I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
int64_t j = J_work [k] ;
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
ilast = i ;
jlast = j ;
}
}
#endif
}
else
{
//------------------------------------------------------------------
// sort a set of (i,k) tuples
//------------------------------------------------------------------
if (S_iso)
{
// K_work is NULL; only sort (i)
info = GB_msort_1 (I_work, nvals, nthreads) ;
}
else
{
info = GB_msort_2 (I_work, K_work, nvals, nthreads) ;
}
#ifdef GB_DEBUG
if (info == GrB_SUCCESS)
{
int64_t ilast = -1 ;
for (int64_t k = 0 ; k < nvals ; k++)
{
int64_t i = I_work [k] ;
ASSERT (ilast <= i) ;
ilast = i ;
}
}
#endif
}
if (info != GrB_SUCCESS)
{
// out of memory in GB_msort_*
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// STEP 3: count vectors and duplicates in each slice
//--------------------------------------------------------------------------
// Duplicates are located, counted and their indices negated. The # of
// vectors in each slice is counted. If the indices are known to not have
// duplicates, then only the vectors are counted. Counting the # of
// vectors is skipped if already done by Step 1.
if (known_no_duplicates)
{
//----------------------------------------------------------------------
// no duplicates: just count # vectors in each slice
//----------------------------------------------------------------------
// This is much faster, particularly if the # of vectors in each slice
// has already been computed.
#ifdef GB_DEBUG
{
// assert that there are no duplicates
int64_t ilast = -1, jlast = -1 ;
for (int64_t t = 0 ; t < nvals ; t++)
{
int64_t i = GB_I_WORK (t), j = GB_J_WORK (t) ;
bool is_duplicate = (i == ilast && j == jlast) ;
ASSERT (!is_duplicate) ;
ilast = i ; jlast = j ;
}
}
#endif
if (vdim <= 1)
{
// all tuples appear in at most one vector, and there are no
// duplicates, so there is no need to scan I_work or J_work.
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
tnvec_slice [tid] = 0 ;
tnz_slice [tid] = tend - tstart ;
}
tnvec_slice [0] = (nvals == 0) ? 0 : 1 ;
}
else
{
// count the # of unique vector indices in J_work. No need to scan
// I_work since there are no duplicates to be found. Also no need
// to compute them if already found in Step 1.
if (!tnvec_and_tnz_slice_computed)
{
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = J_work [t] ;
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = tend - tstart ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// look for duplicates and count # vectors in each slice
//----------------------------------------------------------------------
for (int tid = 0 ; tid < nthreads ; tid++)
{
int64_t tstart = tstart_slice [tid] ;
ilast_slice [tid] = GB_I_WORK (tstart-1) ;
}
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = 0 ;
int64_t my_ndupl = 0 ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t ilast = ilast_slice [tid] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
// tuples are now sorted but there may be duplicates
ASSERT ((jlast < j) || (jlast == j && ilast <= i)) ;
// check if (j,i,k) is a duplicate
if (i == ilast && j == jlast)
{
// flag the tuple as a duplicate
I_work [t] = -1 ;
my_ndupl++ ;
// the sort places earlier duplicate tuples (with smaller
// k) after later ones (with larger k).
ASSERT (GB_K_WORK (t-1) < GB_K_WORK (t)) ;
}
else
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
my_tnvec++ ;
jlast = j ;
}
ilast = i ;
}
}
tnvec_slice [tid] = my_tnvec ;
tnz_slice [tid] = (tend - tstart) - my_ndupl ;
}
}
//--------------------------------------------------------------------------
// find total # of vectors and duplicates in all tuples
//--------------------------------------------------------------------------
// Replace tnvec_slice with its cumulative sum, after which each slice tid
// will be responsible for the # vectors in T that range from tnvec_slice
// [tid] to tnvec_slice [tid+1]-1.
GB_cumsum (tnvec_slice, nthreads, NULL, 1, NULL) ;
int64_t tnvec = tnvec_slice [nthreads] ;
// Replace tnz_slice with its cumulative sum
GB_cumsum (tnz_slice, nthreads, NULL, 1, NULL) ;
// find the total # of final entries, after assembling duplicates
int64_t tnz = tnz_slice [nthreads] ;
int64_t ndupl = nvals - tnz ;
//--------------------------------------------------------------------------
// allocate T; always hypersparse
//--------------------------------------------------------------------------
// allocate T; allocate T->p and T->h but do not initialize them.
// T is always hypersparse. The header T always exists on input, as
// either a static or dynamic header.
bool static_header = T->static_header ;
info = GB_new (&T, static_header, // always hyper, static or dynamic header
ttype, vlen, vdim, GB_Ap_malloc, is_csc,
GxB_HYPERSPARSE, GB_ALWAYS_HYPER, tnvec, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_FREE_WORKSPACE ;
return (info) ;
}
ASSERT (T->p != NULL) ;
ASSERT (T->h != NULL) ;
ASSERT (T->b == NULL) ;
ASSERT (T->i == NULL) ;
ASSERT (T->x == NULL) ;
T->iso = S_iso ; // OK: T is iso if and only if Sx is iso
bool do_burble = (vlen > 1 || vdim > 1) && (nvals > 1) ;
if (do_burble)
{
if (S_iso)
{
GBURBLE ("(iso build) ") ;
}
else
{
GBURBLE ("(build) ") ;
}
}
//--------------------------------------------------------------------------
// STEP 4: construct the vector pointers and hyperlist for T
//--------------------------------------------------------------------------
// Step 4 scans the J_work indices and constructs T->h and T->p.
int64_t *restrict Th = T->h ;
int64_t *restrict Tp = T->p ;
if (vdim <= 1)
{
//----------------------------------------------------------------------
// special case for vectors
//----------------------------------------------------------------------
ASSERT (tnvec == 0 || tnvec == 1) ;
if (tnvec > 0)
{
Th [0] = 0 ;
Tp [0] = 0 ;
}
}
else if (ndupl == 0)
{
//----------------------------------------------------------------------
// no duplicates appear
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t j = GB_J_WORK (t) ;
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = t ;
my_tnvec++ ;
jlast = j ;
}
}
}
}
else
{
//----------------------------------------------------------------------
// it is known that at least one duplicate appears
//----------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (tid = 0 ; tid < nthreads ; tid++)
{
int64_t my_tnz = tnz_slice [tid] ;
int64_t my_tnvec = tnvec_slice [tid] ;
int64_t tstart = tstart_slice [tid] ;
int64_t tend = tstart_slice [tid+1] ;
int64_t jlast = GB_J_WORK (tstart-1) ;
for (int64_t t = tstart ; t < tend ; t++)
{
// get the t-th tuple
int64_t i = I_work [t] ;
int64_t j = GB_J_WORK (t) ;
if (i >= 0)
{
// this is a new tuple
if (j > jlast)
{
// vector j starts in this slice
Th [my_tnvec] = j ;
Tp [my_tnvec] = my_tnz ;
my_tnvec++ ;
jlast = j ;
}
my_tnz++ ;
}
}
}
}
// log the end of the last vector
T->nvec_nonempty = tnvec ;
T->nvec = tnvec ;
Tp [tnvec] = tnz ;
ASSERT (T->nvec == T->plen) ;
T->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// free J_work if it exists
//--------------------------------------------------------------------------
ASSERT (J_work_handle != NULL) ;
GB_FREE (J_work_handle, *J_work_size_handle) ;
J_work = NULL ;
//--------------------------------------------------------------------------
// allocate T->i
//--------------------------------------------------------------------------
if (ndupl == 0)
{
// shrink I_work from size ijslen to size tnz
if (tnz < ijslen)
{
// this cannot fail since the size is shrinking.
bool ok ;
GB_REALLOC (I_work, tnz, int64_t, I_work_size_handle, &ok, Context);
ASSERT (ok) ;
}
// transplant I_work into T->i
T->i = I_work ; T->i_size = (*I_work_size_handle) ;
I_work = NULL ;
(*I_work_handle) = NULL ;
(*I_work_size_handle) = 0 ;
}
else
{
// duplicates exist, so allocate a new T->i. I_work must be freed later
T->i = GB_MALLOC (tnz, int64_t, &(T->i_size)) ;
if (T->i == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
}
int64_t *restrict Ti = T->i ;
//==========================================================================
// numerical phase of the build: assemble any duplicates
//==========================================================================
// The tuples have been sorted. Assemble any duplicates with a switch
// factory of built-in workers, or four generic workers. The vector
// pointers T->p and hyperlist T->h (if hypersparse) have already been
// computed.
// If there are no duplicates, T->i holds the row indices of the tuple.
// Otherwise, the row indices are still in I_work. K_work holds the
// positions of each tuple in the array Sx. The tuples are sorted so that
// duplicates are adjacent to each other and they appear in the order they
// appeared in the original tuples. This method assembles the duplicates
// and computes T->i and T->x from I_work, K_work, and Sx. into T, becoming
// T->i. If no duplicates appear, T->i is already computed, and Sx just
// needs to be copied and permuted into T->x.
// The (i,k,Sx[k]) tuples are held in two integer arrays: (1) I_work or
// T->i, and (2) K_work, and an array Sx of numerical values. Sx has not
// been sorted, nor even accessed yet. It is identical to the original
// unsorted tuples. The (i,k,Sx[k]) tuple holds the row index i, the
// position k, and the value Sx [k]. This entry becomes T(i,j) = Sx [k] in
// the matrix T, and duplicates (if any) are assembled via the dup
// operator.
//--------------------------------------------------------------------------
// get opcodes and check types
//--------------------------------------------------------------------------
// With GB_build, there can be 1 to 2 different types.
// T->type is identical to the types of x,y,z for z=dup(x,y).
// dup is never NULL and all its three types are the same
// The type of Sx (stype) can different but must be compatible
// with T->type
// With GB_wait, there can be 1 to 5 different types:
// The pending tuples are in Sx, of type stype which must be
// compatible with dup->ytype and T->type
// z = dup (x,y): can be NULL or have 1 to 3 different types
// T->type: must be compatible with all above types.
// dup may be NULL, in which case it is assumed be the implicit SECOND
// operator, with all three types equal to T->type
GrB_Type xtype, ytype, ztype ;
GxB_binary_function fdup ;
#ifndef GBCOMPACT
GB_Opcode opcode ;
#endif
GB_Type_code tcode = ttype->code ;
const size_t tsize = ttype->size ;
bool op_2nd ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
if (dup == NULL)
{
//----------------------------------------------------------------------
// dup is the implicit SECOND operator
//----------------------------------------------------------------------
// z = SECOND (x,y) where all three types are the same as ttype
// T(i,j) = (ttype) Sx(k) will be done for all tuples.
#ifndef GBCOMPACT
opcode = GB_SECOND_binop_code ;
#endif
xtype = ttype ;
ytype = ttype ;
ztype = ttype ;
fdup = NULL ;
op_2nd = true ;
ASSERT (GB_op_is_second (dup, ttype)) ;
}
else
{
//----------------------------------------------------------------------
// dup is an explicit operator
//----------------------------------------------------------------------
// T(i,j) = (ttype) Sx[k] will be done for the first tuple.
// for subsequent tuples: T(i,j) += Sx[k], via the dup operator and
// typecasting:
//
// y = (dup->ytype) Sx[k]
// x = (dup->xtype) T(i,j)
// z = (dup->ztype) dup (x,y)
// T(i,j) = (ttype) z
ASSERT_BINARYOP_OK (dup, "dup for build_factory", GB0) ;
ASSERT (!S_iso) ;
#ifndef GBCOMPACT
opcode = dup->opcode ;
#endif
xtype = dup->xtype ;
ytype = dup->ytype ;
ztype = dup->ztype ;
fdup = dup->binop_function ;
op_2nd = GB_op_is_second (dup, ttype) ;
}
//--------------------------------------------------------------------------
// get the sizes and codes of each type
//--------------------------------------------------------------------------
GB_Type_code zcode = ztype->code ;
GB_Type_code xcode = xtype->code ;
GB_Type_code ycode = ytype->code ;
ASSERT (GB_Type_compatible (ttype, stype)) ; // T(i,j) = (ttype) Sx
ASSERT (GB_Type_compatible (ytype, stype)) ; // y = (ytype) Sx
ASSERT (GB_Type_compatible (xtype, ttype)) ; // x = (xtype) T(i,j)
ASSERT (GB_Type_compatible (ttype, ztype)) ; // T(i,j) = (ttype) z
size_t zsize = ztype->size ;
size_t xsize = xtype->size ;
size_t ysize = ytype->size ;
// no typecasting if all 5 types are the same
bool nocasting = (ttype == stype) &&
(ttype == xtype) && (ttype == ytype) && (ttype == ztype) ;
ASSERT_TYPE_OK (ttype, "ttype for build_factory", GB0) ;
ASSERT_TYPE_OK (stype, "stype for build_factory", GB0) ;
ASSERT_TYPE_OK (xtype, "xtype for build_factory", GB0) ;
ASSERT_TYPE_OK (ytype, "ytype for build_factory", GB0) ;
ASSERT_TYPE_OK (ztype, "ztype for build_factory", GB0) ;
//--------------------------------------------------------------------------
// STEP 5: assemble the tuples
//--------------------------------------------------------------------------
bool copy_S_into_T = (nocasting && known_sorted && ndupl == 0) ;
if (copy_S_into_T && S_work != NULL)
{
//----------------------------------------------------------------------
// transplant S_work into T->x
//----------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to copy Sx
// into Tx. Sx can be directly transplanted into T->x since Sx is
// provided as S_work. GB_builder must either transplant or free
// S_work. The transplant can be used by GB_wait, whenever the tuples
// are already sorted, with no duplicates, and no typecasting is
// needed, since S_work is always A->Pending->x. T and Sx may be iso
// or non-iso.
T->x = S_work ; T->x_size = (*S_work_size_handle) ;
S_work = NULL ;
(*S_work_handle) = NULL ;
(*S_work_size_handle) = 0 ;
int64_t tx_size_required = tnz * tsize ;
if (2 * tx_size_required < T->x_size)
{
// shrink the size of T->x
bool ok = true ;
GB_REALLOC (T->x, tx_size_required, GB_void, &(T->x_size), &ok,
Context) ;
}
}
else
{
//----------------------------------------------------------------------
// allocate T->x
//----------------------------------------------------------------------
T->x = GB_XALLOC (false, S_iso, tnz, tsize, &(T->x_size)) ; // x:OK
if (T->x == NULL)
{
// out of memory
GB_phbix_free (T) ;
GB_FREE_WORKSPACE ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_void *restrict Tx = (GB_void *) T->x ;
ASSERT (GB_IMPLIES (nvals > 0, Sx != NULL)) ;
if (nvals == 0)
{
// nothing to do
}
else if (copy_S_into_T)
{
//------------------------------------------------------------------
// copy Sx into T->x
//------------------------------------------------------------------
// No typecasting is needed, the tuples were originally in sorted
// order, and no duplicates appear. All that is required is to
// copy Sx into Tx. Sx cannot be transplanted into T->x since
// S_work is NULL and S_input cannot be modified by GB_builder.
ASSERT (S_work == NULL) ;
ASSERT (Sx == S_input) ;
GB_memcpy (Tx, Sx, (S_iso ? 1 : nvals) * tsize, nthreads) ;
}
else if (nocasting)
{
//------------------------------------------------------------------
// assemble the values, Sx, into T, no typecasting needed
//------------------------------------------------------------------
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled.
// There are 44 common cases of this function for built-in types
// and 8 associative operators: MIN, MAX, PLUS, TIMES for 10 types
// (all but boolean; and OR, AND, XOR, and EQ for boolean.
// In addition, the FIRST and SECOND operators are hard-coded, for
// another 22 workers, since SECOND is used by GB_wait and since
// FIRST is useful for keeping the first tuple seen. It is
// controlled by the GB_INCLUDE_SECOND_OPERATOR definition, so they
// do not appear in GB_reduce_to_* where the FIRST and SECOND
// operators are not needed.
// Early exit cannot be exploited, so the terminal is ignored.
bool done = false ;
if (S_iso)
{
//--------------------------------------------------------------
// T and Sx are iso; set iso value and delete duplicates
//--------------------------------------------------------------
memcpy (Tx, Sx, tsize) ;
#define GB_ISO_BUILD
#include "GB_reduce_build_template.c"
done = true ;
}
else
{
//--------------------------------------------------------------
// T and Sx are not iso; call in the workers
//--------------------------------------------------------------
#ifndef GBCOMPACT
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_INCLUDE_SECOND_OPERATOR
#define GB_red(opname,aname) \
GB (_red_build_ ## opname ## aname)
#define GB_RED_WORKER(opname,aname,atype) \
{ \
info = GB_red (opname, aname) ((atype *) Tx, Ti, \
(atype *) Sx, nvals, ndupl, I_work, K_work, \
tstart_slice, tnz_slice, nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
// controlled by opcode and typecode
GB_Type_code typecode = tcode ;
#include "GB_red_factory.c"
#endif
}
//------------------------------------------------------------------
// generic worker
//------------------------------------------------------------------
if (!done)
{
if (do_burble) GBURBLE ("(generic build) ") ;
//--------------------------------------------------------------
// no typecasting, but use the fdup function pointer and memcpy
//--------------------------------------------------------------
// Either the fdup operator or type of Sx and T are
// user-defined, or fdup is not an associative operator handled
// by the GB_red_factory, or some combination of these
// conditions. User-defined types cannot be typecasted, so
// this handles all user-defined types.
// Tx [p] = (ttype) Sx [k], but with no typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
memcpy (Tx +((p)*tsize), Sx +((k)*tsize), tsize) ;
if (op_2nd)
{
//----------------------------------------------------------
// dup is the SECOND operator, with no typecasting
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op and no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//----------------------------------------------------------
// dup is another operator, with no typecasting needed
//----------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but with no typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
fdup (Tx +((p)*tsize), Tx +((p)*tsize), Sx+((k)*tsize));
#include "GB_reduce_build_template.c"
}
}
}
else
{
//------------------------------------------------------------------
// assemble the values Sx into T, typecasting as needed
//------------------------------------------------------------------
if (do_burble)
{
GBURBLE ("(generic build with typecast) ") ;
}
// If T and Sx are iso, no typecasting is ever done, so this method
// is not used in that case.
ASSERT (!S_iso) ;
// Sx (either S_work or S_input) must be permuted and copied into
// T->x, since the tuples had to be sorted, or duplicates appear.
// Any duplicates are now assembled. Not all of the 5 types are
// the same, but all of them are built-in since user-defined types
// cannot be typecasted.
const GB_Type_code scode = stype->code ;
const size_t ssize = stype->size ;
GB_cast_function cast_S_to_T = GB_cast_factory (tcode, scode) ;
GB_cast_function cast_S_to_Y = GB_cast_factory (ycode, scode) ;
GB_cast_function cast_T_to_X = GB_cast_factory (xcode, tcode) ;
GB_cast_function cast_Z_to_T = GB_cast_factory (tcode, zcode) ;
ASSERT (scode <= GB_FC64_code) ;
ASSERT (tcode <= GB_FC64_code) ;
ASSERT (xcode <= GB_FC64_code) ;
ASSERT (ycode <= GB_FC64_code) ;
ASSERT (zcode <= GB_FC64_code) ;
// Tx [p] = (ttype) Sx [k], with typecasting
#undef GB_CAST_ARRAY_TO_ARRAY
#define GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
cast_S_to_T (Tx +((p)*tsize), Sx +((k)*ssize), ssize) ;
if (op_2nd)
{
//--------------------------------------------------------------
// dup operator is the SECOND operator, with typecasting
//--------------------------------------------------------------
// Tx [p] += (ttype) Sx [k], but 2nd op, with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
GB_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k)
#include "GB_reduce_build_template.c"
}
else
{
//--------------------------------------------------------------
// dup is another operator, with typecasting required
//--------------------------------------------------------------
// Tx [p] += Sx [k], with typecasting
#undef GB_ADD_CAST_ARRAY_TO_ARRAY
#define GB_ADD_CAST_ARRAY_TO_ARRAY(Tx,p,Sx,k) \
{ \
/* ywork = (ytype) Sx [k] */ \
GB_void ywork [GB_VLA(ysize)] ; \
cast_S_to_Y (ywork, Sx +((k)*ssize), ssize) ; \
/* xwork = (xtype) Tx [p] */ \
GB_void xwork [GB_VLA(xsize)] ; \
cast_T_to_X (xwork, Tx +((p)*tsize), tsize) ; \
/* zwork = f (xwork, ywork) */ \
GB_void zwork [GB_VLA(zsize)] ; \
fdup (zwork, xwork, ywork) ; \
/* Tx [tnz-1] = (ttype) zwork */ \
cast_Z_to_T (Tx +((p)*tsize), zwork, zsize) ; \
}
#include "GB_reduce_build_template.c"
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORKSPACE ;
T->jumbled = false ;
ASSERT_MATRIX_OK (T, "T built", GB0) ;
ASSERT (GB_IS_HYPERSPARSE (T)) ;
return (GrB_SUCCESS) ;
}
|
test_task_forkjoin.c | //===-- test_task_forkjoin.c - Test task execution at join ----------*- C++ -*-===//
//
// Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
//
// This file has been modified from the file
// openmp/runtime/test/tasking/omp_task.c
// of the LLVM project (https://github.com/llvm/llvm-project)
// under the Apache License v2.0 with LLVM Exceptions.
//
//===----------------------------------------------------------------------===//
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <math.h>
#include <omp.h>
#include "tests.h"
#define DEBUG 0
#if (DEBUG)
#define dprintf(...) printf(__VA_ARGS__)
#else
#define dprintf(...) (void)0
#endif
int test_omp_task(void) {
int tids[NUM_TASKS];
#pragma omp parallel shared(tids)
{
#pragma omp single nowait
{
for (int i = 0; i < NUM_TASKS; i++) {
printf("Create task %d (&tids: %p, &i: %p)\n", i, &tids, &i);
#pragma omp task firstprivate(i), shared(tids)
{
int me = omp_get_thread_num();
dprintf("%d: &tids: %p, &i: %p, i: %d\n", me, &tids, &i, i);
if (!(0 <= i && i < NUM_TASKS)) {
printf("%d: i (%d) out of range (0 <= i < %d)\n", me, i, NUM_TASKS);
}
dprintf("%d: i: %d\n", me, i);
sleep(SLEEPTIME);
if (!(0 <= i && i < NUM_TASKS)) {
tids[i] = me;
}
printf("Executed task %d in thread %d\n", i, me);
} /* end of omp task */
} /* end of for */
} /* end of single */
} /*end of parallel */
/* Now we check that more than one thread executed the tasks. */
for (int i = 1; i < NUM_TASKS; i++) {
if (tids[0] != tids[i])
return 1;
}
return 0;
}
int main(void) {
int i;
int num_failed = 0;
if (omp_get_max_threads() < 2) {
printf("Not enough threads for this test! Need >2 threads!\n");
}
// omp_set_num_threads(8);
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_task()) {
num_failed++;
}
}
return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS;
}
|
zgbsv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gbsv
*
* Computes the solution to a system of linear equations A * X = B,
* using the LU factorization computed by plasma_zgbtrf.
*
*******************************************************************************
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] kl
* The number of subdiagonals within the band of A. kl >= 0.
*
* @param[in] ku
* The number of superdiagonals within the band of A. ku >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] AB
* Details of the LU factorization of the band matrix A, as
* computed by plasma_zgbtrf.
*
* @param[in] ldab
* The leading dimension of the array AB.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
******************************************************************************/
int plasma_zgbsv(int n, int kl, int ku, int nrhs,
plasma_complex64_t *pAB, int ldab, int *ipiv,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (n < 0) {
plasma_error("illegal value of n");
return -1;
}
if (kl < 0) {
plasma_error("illegal value of kl");
return -2;
}
if (ku < 0) {
plasma_error("illegal value of ku");
return -3;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -4;
}
if (ldab < imax(1, 1+kl+ku)) {
plasma_error("illegal value of ldab");
return -6;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -8;
}
// quick return
if (imin(n, nrhs) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gbtrf(plasma, PlasmaComplexDouble, n, kl+ku+1);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier.
plasma_barrier_init(&plasma->barrier);
// Create tile matrix.
plasma_desc_t AB;
plasma_desc_t B;
int tku = (ku+kl+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (kl+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use zgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_band_create(PlasmaComplexDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, kl, ku, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AB);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zpb2desc(pAB, ldab, AB, &sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_zgbsv(AB, ipiv, B, &sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_zdesc2pb(AB, pAB, ldab, &sequence, &request);
plasma_omp_zdesc2ge(B, pB, ldb, &sequence, &request);
}
// Free matrices in tile layout.
plasma_desc_destroy(&B);
plasma_desc_destroy(&AB);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* Computes the solution to a system of linear equations A * X = B,
* using the LU factorization computed by plasma_zgbtrf.
* Non-blocking tile version of plasma_zgbsv().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] AB
* Descriptor of matrix A.
*
* @param[out] ipiv
* The pivot indices; for 1 <= i <= min(m,n), row i of the
* matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* Descriptor of right-hand-sides B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
******************************************************************************/
void plasma_omp_zgbsv(plasma_desc_t AB, int *ipiv, plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid AB");
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.n == 0 || B.n == 0)
return;
// Call the parallel function.
plasma_pzgbtrf(AB, ipiv, sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, AB,
B,
ipiv,
sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, AB,
B,
ipiv,
sequence, request);
}
|
SpatialReflectionPadding.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialReflectionPadding.c"
#else
static void THNN_(SpatialReflectionPadding_updateOutput_frame)(
real *input_p, real *output_p,
long nslices,
long iwidth, long iheight,
long owidth, long oheight,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int iStartX = fmax(0, -pad_l);
int iStartY = fmax(0, -pad_t);
int oStartX = fmax(0, pad_l);
int oStartY = fmax(0, pad_t);
long k, ip_x, ip_y;
#pragma omp parallel for private(k, ip_x, ip_y)
for (k = 0; k < nslices; k++)
{
long i, j;
for (i = 0; i < oheight; i++) {
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l * 2 - j;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = (iwidth + pad_l - 1) * 2 - j;
}
ip_x = ip_x - oStartX + iStartX;
if (i < pad_t) {
ip_y = pad_t * 2 - i;
} else if (i >= pad_t && i < iheight + pad_t) {
ip_y = i;
} else {
ip_y = (iheight + pad_t - 1) * 2 - i;
}
ip_y = ip_y - oStartY + iStartY;
real *dest_p = output_p + k*owidth*oheight + i * owidth + j;
real *src_p = input_p + k*iwidth*iheight + ip_y * iwidth + ip_x;
*dest_p = *src_p;
}
}
}
}
void THNN_(SpatialReflectionPadding_updateOutput)(THNNState *state,
THTensor *input,
THTensor *output,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int dimw = 2;
int dimh = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long iheight;
long iwidth;
long oheight;
long owidth;
real *input_data;
real *output_data;
THNN_ARGCHECK(input->nDimension == 3 || input->nDimension == 4, 2, input,
"3D or 4D (batch mode) tensor expected for input, but got: %s");
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimw++;
dimh++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
iheight = input->size[dimh];
iwidth = input->size[dimw];
oheight = iheight + pad_t + pad_b;
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth >= 1 || oheight >= 1 , 2,
"input (H: %d, W: %d)is too small."
" Calculated output H: %d W: %d",
iheight, iwidth, oheight, owidth);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
/* resize output */
if (input->nDimension == 3)
{
THTensor_(resize3d)(output, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(SpatialReflectionPadding_updateOutput_frame)(input_data, output_data,
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
}
else
{
long p;
THTensor_(resize4d)(output, nbatch, nslices, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialReflectionPadding_updateOutput_frame)(
input_data+p*nslices*iwidth*iheight,
output_data+p*nslices*owidth*oheight,
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(SpatialReflectionPadding_updateGradInput_frame)(
real *ginput_p, real *goutput_p,
long nslices,
long iwidth, long iheight,
long owidth, long oheight,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int iStartX = fmax(0, -pad_l);
int iStartY = fmax(0, -pad_t);
int oStartX = fmax(0, pad_l);
int oStartY = fmax(0, pad_t);
long k, ip_x, ip_y;
#pragma omp parallel for private(k, ip_x, ip_y)
for (k = 0; k < nslices; k++)
{
long i, j;
for (i = 0; i < oheight; i++) {
for (j = 0; j < owidth; j++) {
if (j < pad_l) {
ip_x = pad_l * 2 - j;
} else if (j >= pad_l && j < iwidth + pad_l) {
ip_x = j;
} else {
ip_x = (iwidth + pad_l - 1) * 2 - j;
}
ip_x = ip_x - oStartX + iStartX;
if (i < pad_t) {
ip_y = pad_t * 2 - i;
} else if (i >= pad_t && i < iheight + pad_t) {
ip_y = i;
} else {
ip_y = (iheight + pad_t - 1) * 2 - i;
}
ip_y = ip_y - oStartY + iStartY;
real *src_p = goutput_p + k*owidth*oheight + i * owidth + j;
real *dest_p = ginput_p + k*iwidth*iheight + ip_y * iwidth + ip_x;
*dest_p += *src_p;
}
}
}
}
void THNN_(SpatialReflectionPadding_updateGradInput)(THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int pad_l, int pad_r,
int pad_t, int pad_b)
{
int dimw = 2;
int dimh = 1;
int dimslices = 0;
long nbatch = 1;
long nslices;
long iheight;
long iwidth;
long oheight;
long owidth;
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimw++;
dimh++;
dimslices++;
}
/* sizes */
nslices = input->size[dimslices];
iheight = input->size[dimh];
iwidth = input->size[dimw];
oheight = iheight + pad_t + pad_b;
owidth = iwidth + pad_l + pad_r;
THArgCheck(owidth == THTensor_(size)(gradOutput, dimw), 3,
"gradOutput width unexpected. Expected: %d, Got: %d",
owidth, THTensor_(size)(gradOutput, dimw));
THArgCheck(oheight == THTensor_(size)(gradOutput, dimh), 3,
"gradOutput height unexpected. Expected: %d, Got: %d",
oheight, THTensor_(size)(gradOutput, dimh));
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* backprop */
if (input->nDimension == 3) {
THNN_(SpatialReflectionPadding_updateGradInput_frame)(
THTensor_(data)(gradInput),
THTensor_(data)(gradOutput),
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
} else {
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++) {
THNN_(SpatialReflectionPadding_updateGradInput_frame)(
THTensor_(data)(gradInput) + p * nslices * iheight * iwidth,
THTensor_(data)(gradOutput) + p * nslices * oheight * owidth,
nslices,
iwidth, iheight,
owidth, oheight,
pad_l, pad_r,
pad_t, pad_b);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
hoImageRegDissimilarityHistogramBased.h | /** \file hoImageRegDissimilarityHistogramBased.h
\brief Define the class to compute image dissimilarity based on histogram
\author Hui Xue
*/
#ifndef hoImageRegDissimilarityHistogramBased_H_
#define hoImageRegDissimilarityHistogramBased_H_
#pragma once
#include <limits>
#include "hoMatrix.h"
#include "hoImageRegDissimilarity.h"
namespace Gadgetron {
template<typename ImageType>
class hoImageRegDissimilarityHistogramBased : public hoImageRegDissimilarity<ImageType>
{
public:
typedef hoImageRegDissimilarityHistogramBased<ImageType> Self;
typedef hoImageRegDissimilarity<ImageType> BaseClass;
enum { D = ImageType::NDIM };
typedef typename BaseClass::InterpolatorType InterpolatorType;
typedef typename BaseClass::ValueType ValueType;
typedef ValueType T;
typedef ValueType element_type;
typedef ValueType value_type;
typedef typename BaseClass::coord_type coord_type;
typedef double hist_value_type;
hoImageRegDissimilarityHistogramBased(unsigned int num_bin_target=64, unsigned int num_bin_warpped=64, ValueType bg_value=ValueType(0));
virtual ~hoImageRegDissimilarityHistogramBased();
virtual ValueType evaluate(ImageType& w);
virtual bool evaluateDeriv(ImageType& w) = 0;
virtual void print(std::ostream& os) const;
/// number of intensity bins
unsigned int num_bin_target_;
unsigned int num_bin_warpped_;
/// whether to perform partial interpolation for histogram
bool pv_interpolation_;
/// step size to ignore pixels when creating histogram
size_t step_size_ignore_pixel_;
using BaseClass::gt_timer1_;
using BaseClass::gt_timer2_;
using BaseClass::gt_timer3_;
using BaseClass::performTiming_;
using BaseClass::gt_exporter_;
using BaseClass::debugFolder_;
protected:
using BaseClass::target_;
using BaseClass::warpped_;
using BaseClass::deriv_;
using BaseClass::bg_value_;
using BaseClass::dissimilarity_;
using BaseClass::target;
using BaseClass::warped;
using BaseClass::deriv;
using BaseClass::image_dim_;
/// store the 2D histogram
hoMatrix<hist_value_type> hist_;
/// min/max intensities of target and warped
ValueType min_target_;
ValueType max_target_;
ValueType min_warpped_;
ValueType max_warpped_;
size_t num_samples_in_hist_;
};
template<typename ImageType>
hoImageRegDissimilarityHistogramBased<ImageType>::
hoImageRegDissimilarityHistogramBased(unsigned int num_bin_target, unsigned int num_bin_warpped, ValueType bg_value)
: BaseClass(bg_value), num_bin_target_(num_bin_target), num_bin_warpped_(num_bin_warpped), pv_interpolation_(false), step_size_ignore_pixel_(1)
{
}
template<typename ImageType>
hoImageRegDissimilarityHistogramBased<ImageType>::~hoImageRegDissimilarityHistogramBased()
{
}
template<typename ImageType>
typename hoImageRegDissimilarityHistogramBased<ImageType>::ValueType hoImageRegDissimilarityHistogramBased<ImageType>::evaluate(ImageType& w)
{
try
{
BaseClass::evaluate(w);
// allocate histogram
hist_.createMatrix(num_bin_target_, num_bin_warpped_);
Gadgetron::clear(hist_);
// intensity range
min_target_ = std::numeric_limits<ValueType>::max();
max_target_ = std::numeric_limits<ValueType>::min();
min_warpped_ = min_target_;
max_warpped_ = max_target_;
size_t N = target_->get_number_of_elements();
long long n;
for ( n=0; n<(long long)N; n++ )
{
ValueType vt = target(n);
if ( vt < min_target_ ) min_target_ = vt;
if ( vt > max_target_ ) max_target_ = vt;
ValueType vw = warped(n);
if ( vw < min_warpped_ ) min_warpped_ = vw;
if ( vw > max_warpped_ ) max_warpped_ = vw;
}
ValueType range_t = ValueType(1.0)/(max_target_ - min_target_ + std::numeric_limits<ValueType>::epsilon());
ValueType range_w = ValueType(1.0)/(max_warpped_ - min_warpped_ + std::numeric_limits<ValueType>::epsilon());
num_samples_in_hist_ = 0;
if ( pv_interpolation_ )
{
#pragma omp parallel for default(none) private(n) shared(N, range_t, range_w)
for ( n=0; n<(long long)N; n+=(long long)step_size_ignore_pixel_ )
{
ValueType vt = target(n);
ValueType vw = warped(n);
if ( std::abs(vt-bg_value_)<FLT_EPSILON
&& std::abs(vw-bg_value_)<FLT_EPSILON )
{
continue;
}
ValueType xT = range_t*(vt-min_target_)*(num_bin_target_-1);
ValueType xW = range_w*(vw-min_warpped_)*(num_bin_warpped_-1);
size_t indT = static_cast<size_t>(xT);
size_t indW = static_cast<size_t>(xW);
ValueType sT, s1T, sW, s1W;
sT = xT - indT; s1T = 1 - sT;
sW = xW - indW; s1W = 1 - sW;
#pragma omp critical
{
hist_(indT, indW) += s1T*s1W;
if ( indT<num_bin_target_-1 && indW<num_bin_warpped_-1 )
{
hist_(indT, indW+1) += s1T*sW;
hist_(indT+1, indW) += sT*s1W;
hist_(indT+1, indW+1) += sT*sW;
}
}
#pragma omp atomic
num_samples_in_hist_++;
}
}
else
{
#pragma omp parallel for default(none) private(n) shared(N, range_t, range_w)
for ( n=0; n<(long long)N; n+=(long long)step_size_ignore_pixel_ )
{
ValueType vt = target(n);
ValueType vw = warped(n);
if ( std::abs(vt-bg_value_)<FLT_EPSILON
&& std::abs(vw-bg_value_)<FLT_EPSILON )
{
continue;
}
size_t indT = static_cast<size_t>( range_t*(vt-min_target_)*(num_bin_target_-1) + 0.5 );
size_t indW = static_cast<size_t>( range_w*(vw-min_warpped_)*(num_bin_warpped_-1) + 0.5 );
#pragma omp critical
{
hist_(indT, indW)++;
}
#pragma omp atomic
num_samples_in_hist_++;
}
}
if ( !debugFolder_.empty() ) { gt_exporter_.export_array(hist_, debugFolder_+"hist2D"); }
}
catch(...)
{
GERROR_STREAM("Errors happened in hoImageRegDissimilarityHistogramBased<ImageType>::evaluate(ImageType& t, ImageType& w) ... ");
}
return this->dissimilarity_;
}
template<typename ImageType>
void hoImageRegDissimilarityHistogramBased<ImageType>::print(std::ostream& os) const
{
using namespace std;
os << "--------------Gagdgetron image dissimilarity with histogram -------------" << endl;
os << "Image dimension is : " << D << endl;
std::string elemTypeName = std::string(typeid(ValueType).name());
os << "Transformation data type is : " << elemTypeName << std::endl;
os << "Number of intensity bins for target is : " << num_bin_target_ << endl;
os << "Number of intensity bins for warped is : " << num_bin_warpped_ << endl;
os << "PV interpolation for histogram is : " << pv_interpolation_ << endl;
os << "Step size to ignore pixels when creating histogram is : " << step_size_ignore_pixel_ << endl << ends;
}
}
#endif // hoImageRegDissimilarityHistogramBased_H_
|
GB_binop__lxor_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__lxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__lxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__lxor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_uint16)
// A*D function (colscale): GB (_AxD__lxor_uint16)
// D*A function (rowscale): GB (_DxB__lxor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__lxor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__lxor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_uint16)
// C=scalar+B GB (_bind1st__lxor_uint16)
// C=scalar+B' GB (_bind1st_tran__lxor_uint16)
// C=A+scalar GB (_bind2nd__lxor_uint16)
// C=A'+scalar GB (_bind2nd_tran__lxor_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = ((aij != 0) != (bij != 0))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) != (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LXOR || GxB_NO_UINT16 || GxB_NO_LXOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lxor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lxor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__lxor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__lxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lxor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lxor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) != (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lxor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) != (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) != (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__lxor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) != (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__lxor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
signalMachine.c | #include <getopt.h>
#include <string.h>
#include "signalMachineUtils.h"
#include "pairwiseAligner.h"
#define STEP 6 // space between degenerate nucleotides in for error correction
#define ESTIMATE_PARAMS 1
#define ASSIGNMENT_THRESHOLD 0.1
typedef enum {
full = 0,
variantCaller = 1,
assignments = 2
} OutputFormat;
void usage() {
fprintf(stderr, "signalMachine binary, meant to be used through the signalAlign program.\n");
fprintf(stderr, "See doc for runSignalAlign for help\n");
}
void printPairwiseAlignmentSummary(struct PairwiseAlignment *pA) {
st_uglyf("contig 1: %s\n", pA->contig1);
st_uglyf("strand 1: %lld\n", pA->strand1);
st_uglyf("start 1: %lld\n", pA->start1);
st_uglyf("end 1: %lld\n", pA->end1);
st_uglyf("contig 2: %s\n", pA->contig2);
st_uglyf("strand 2: %lld\n", pA->strand2);
st_uglyf("start 2: %lld\n", pA->start2);
st_uglyf("end 2: %lld\n", pA->end2);
}
inline int64_t adjustReferenceCoordinate(int64_t x_i, int64_t referenceSeqOffset,
int64_t referenceLengthInKmers, int64_t referenceLength,
Strand strand, bool forward) {
if ((strand == template && forward) || (strand == complement && !forward)) {
return x_i + referenceSeqOffset;
} else {
return referenceLengthInKmers - (x_i + (referenceLength - referenceSeqOffset));
}
}
inline char *makeReferenceKmer(const char *k_i, Strand strand, bool forward) {
if ((strand == template && forward) || (strand == complement && !forward)) {
return stString_copy(k_i);
} else {
return stString_reverseComplementString(k_i);
}
}
inline char *kmerFromString(const char *string, int64_t start, int64_t kmerLength) {
char *k_i = st_malloc(kmerLength * sizeof(char));
for (int64_t i = 0; i < kmerLength; i++) {
k_i[i] = *(string + (start + i));
}
k_i[kmerLength] = '\0';
return k_i;
}
inline int64_t adjustQueryPosition(int64_t unadjustedQueryPosition, int64_t kmerLength, Strand strand, bool forward) {
if ((strand == template && forward) || (strand == complement && !forward)) {
return unadjustedQueryPosition;
} else {
return (kmerLength - 1) - unadjustedQueryPosition;
}
}
void writePosteriorProbsFull(char *posteriorProbsFile, char *readLabel, StateMachine *sM,
NanoporeReadAdjustmentParameters npp, double *events, char *target, bool forward,
char *contig, int64_t eventSequenceOffset, int64_t referenceSequenceOffset,
stList *alignedPairs, Strand strand) {
// label for tsv output
char *strandLabel = strand == template ? "t" : "c";
// open the file for output
FILE *fH = fopen(posteriorProbsFile, "a");
// get some lengths outside the loop
int64_t refLength = (int64_t )strlen(target);
int64_t refLengthInKmers = refLength - sM->kmerLength;
for(int64_t i = 0; i < stList_length(alignedPairs); i++) {
// grab the aligned pair
stIntTuple *aPair = stList_get(alignedPairs, i);
if (stIntTuple_length(aPair) != 4) {
st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n",
stIntTuple_length(aPair));
}
// nucleotide sequence coordinate
int64_t x_i = stIntTuple_get(aPair, 1);
// adjust back to reference coordinates
int64_t x_adj = adjustReferenceCoordinate(x_i, referenceSequenceOffset, refLengthInKmers, refLength,
strand, forward);
// event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment)
int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset;
// posterior probability
double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1;
// path (variant-called) kmer
char *pathKmer = (char *)stIntTuple_get(aPair, 3);
double eventMean = sequence_getEventMean(events, y);
double eventNoise = sequence_getEventNoise(events, y);
double eventDuration = sequence_getEventDuration(events, y);
// make the kmer string at the target index,
char *k_i = kmerFromString(target, x_i, sM->kmerLength);
int64_t targetKmerIndex = kmer_id(pathKmer, sM->alphabet, sM->alphabetSize, sM->kmerLength);
// get the expected event mean amplitude and noise
double E_mean = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS)];
double E_noise = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS + 2)];
double scaled_Emean = E_mean * npp.scale + npp.shift;
double scaled_Enoise = E_noise * npp.scale_sd;
double descaledEventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, E_mean,
npp.scale, npp.shift, npp.var);
// make reference kmer
char *refKmer = makeReferenceKmer(k_i, strand, forward);
// write to file
fprintf(fH, "%s\t%"PRId64"\t%s\t%s\t%s\t%"PRId64"\t%f\t%f\t%f\t%s\t%f\t%f\t%f\t%f\t%f\t%s\n",
contig, x_adj, refKmer, readLabel, strandLabel, y, eventMean, eventNoise, eventDuration, k_i,
scaled_Emean, scaled_Enoise, p, descaledEventMean, E_mean, pathKmer);
// cleanup
free(k_i);
free(refKmer);
}
fclose(fH);
}
void writePosteriorProbsVC(char *posteriorProbsFile, char *readLabel, StateMachine *sM, char *target, bool forward,
int64_t eventSequenceOffset, int64_t referenceSequenceOffset, stList *alignedPairs,
Strand strand) {
// label for tsv output
char *strandLabel = strand == template ? "t" : "c";
char *forwardLabel = forward ? "forward" : "backward";
// open the file for output
FILE *fH = fopen(posteriorProbsFile, "a");
// get some lengths outside the loop
int64_t refLength = (int64_t )strlen(target);
int64_t refLengthInKmers = refLength - sM->kmerLength;
for(int64_t i = 0; i < stList_length(alignedPairs); i++) {
// grab the aligned pair
stIntTuple *aPair = stList_get(alignedPairs, i);
if (stIntTuple_length(aPair) != 4) {
st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n",
stIntTuple_length(aPair));
}
// trimmed nucleotide sequence coordinate
int64_t x_i = stIntTuple_get(aPair, 1);
// make the kmer string at the target index,
char *k_i = kmerFromString(target, x_i, sM->kmerLength);
char *refKmer = makeReferenceKmer(k_i, strand, forward);
stList *queryPositions = path_findDegeneratePositions(refKmer, sM->kmerLength);
// check if this aligned pair reports on a query position
if (stList_length(queryPositions) == 0) {
free(k_i);
free(refKmer);
stList_destruct(queryPositions);
continue;
}
// adjust back to reference coordinates
int64_t x_adj = adjustReferenceCoordinate(x_i, referenceSequenceOffset, refLengthInKmers, refLength,
strand, forward);
// event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment)
int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset;
// posterior probability
double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1;
// path (variant-called) kmer
char *pathKmer = (char *)stIntTuple_get(aPair, 3);
// get the base that was called in this aligned pair
int64_t nQueryPositions = stList_length(queryPositions);
for (int64_t q = 0; q < nQueryPositions; q++) {
// position in the reference kmer eg. AGXGG -> 2
int64_t unadjustedQueryPosition = *(int64_t *)stList_get(queryPositions, q);
// position in the pathKmer
int64_t queryPosition = adjustQueryPosition(unadjustedQueryPosition, sM->kmerLength,
strand, forward);
// called base
char base = pathKmer[queryPosition];
// position in the reference we're reporting on
int64_t reportPosition = x_adj + unadjustedQueryPosition;
fprintf(fH, "%"PRId64"\t%"PRId64"\t%c\t%f\t%s\t%s\t%s\n", y, reportPosition, base, p,
strandLabel, forwardLabel, readLabel);
}
free(k_i);
free(refKmer);
stList_destruct(queryPositions);
}
fclose(fH);
}
void writeAssignments(char *posteriorProbsFile, StateMachine *sM, double *events, int64_t eventSequenceOffset,
NanoporeReadAdjustmentParameters npp, stList *alignedPairs, Strand strand) {
// label for tsv output
char *strandLabel = strand == template ? "t" : "c";
// open the file for output
FILE *fH = fopen(posteriorProbsFile, "a");
for(int64_t i = 0; i < stList_length(alignedPairs); i++) {
// grab the aligned pair
stIntTuple *aPair = stList_get(alignedPairs, i);
if (stIntTuple_length(aPair) != 4) {
st_errAbort("Aligned pair tuples should have length 4, this one has length %lld\n",
stIntTuple_length(aPair));
}
// event index, adjust to to entire event sequence coordinates (event sequence is trimmed during alignment)
int64_t y = stIntTuple_get(aPair, 2) + eventSequenceOffset;
// posterior probability
double p = ((double)stIntTuple_get(aPair, 0)) / PAIR_ALIGNMENT_PROB_1;
// path (variant-called) kmer
char *pathKmer = (char *)stIntTuple_get(aPair, 3);
// get the observed event mean
double eventMean = sequence_getEventMean(events, y);
// get the kmer index
int64_t targetKmerIndex = kmer_id(pathKmer, sM->alphabet, sM->alphabetSize, sM->kmerLength);
// get the expected mean from the model
double E_mean = sM->EMISSION_MATCH_MATRIX[(targetKmerIndex * MODEL_PARAMS)];
// descale the observed mean
double descaledEventMean = emissions_signal_descaleEventMean_JordanStyle(eventMean, E_mean,
npp.scale, npp.shift, npp.var);
fprintf(fH, "%s\t%s\t%lf\t%lf\n", pathKmer, strandLabel, descaledEventMean, p);
}
fclose(fH);
}
void outputAlignment(OutputFormat fmt,
char *posteriorProbsFile, char *readLabel, StateMachine *sM, NanoporeReadAdjustmentParameters npp,
double *events, char *target, bool forward, char *contig, int64_t eventSequenceOffset,
int64_t referenceSequenceOffset, stList *alignedPairs, Strand strand) {
switch (fmt) {
case full:
writePosteriorProbsFull(posteriorProbsFile, readLabel, sM, npp, events, target, forward, contig,
eventSequenceOffset, referenceSequenceOffset, alignedPairs, strand);
break;
case variantCaller:
writePosteriorProbsVC(posteriorProbsFile, readLabel, sM, target, forward, eventSequenceOffset,
referenceSequenceOffset, alignedPairs, strand);
break;
case assignments:
writeAssignments(posteriorProbsFile, sM, events, eventSequenceOffset, npp, alignedPairs, strand);
break;
default:
fprintf(stderr, "signalAlign - No valid output format provided\n");
return;
}
}
StateMachine *buildStateMachine(const char *modelFile, NanoporeReadAdjustmentParameters npp, StateMachineType type,
NanoporeHDP *nHdp) {
if ((type != threeState) && (type != threeStateHdp)) {
st_errAbort("signalAlign - incompatible stateMachine type request");
}
if (!stFile_exists(modelFile)) {
st_errAbort("signalAlign - ERROR: couldn't find model file here: %s\n", modelFile);
}
if (type == threeState) {
StateMachine *sM = getStateMachine3_descaled(modelFile, npp, !ESTIMATE_PARAMS);
return sM;
}
if (type == threeStateHdp) {
StateMachine *sM = getHdpStateMachine(nHdp, modelFile, npp);
return sM;
}
else {
st_errAbort("signalAlign - ERROR: buildStateMachine, didn't get correct input\n");
}
return 0;
}
inline void loadHmmRoutine(const char *hmmFile, StateMachine *sM, StateMachineType type, Hmm *expectations) {
if ((type != threeState) && (type != threeStateHdp)) {
st_errAbort("LoadSignalHmm : unupported stateMachineType");
}
hmmContinuous_loadSignalHmm(hmmFile, sM, type, expectations);
}
StateMachine *buildStateMachineAndLoadHmm(const char *modelFile, NanoporeReadAdjustmentParameters npp,
StateMachineType type, NanoporeHDP *nHdp) {
StateMachine *sM = buildStateMachine(modelFile, npp, type, nHdp);
// commented out because now the model file has the transitions and the event model, so no longer need to
// load the .hmm into the stateMachine
//if (HmmFile != NULL) {
// loadHmmRoutine(HmmFile, sM, sM->type, hmmExpectations);
//}
return sM;
}
void updateHdpFromAssignments(const char *nHdpFile, const char *expectationsFile, const char *nHdpOutFile) {
NanoporeHDP *nHdp = deserialize_nhdp(nHdpFile);
Hmm *hdpHmm = hdpHmm_loadFromFile(expectationsFile, threeStateHdp, nHdp);
hmmContinuous_destruct(hdpHmm, hdpHmm->type);
fprintf(stderr, "signalAlign - Running Gibbs on HDP\n");
execute_nhdp_gibbs_sampling(nHdp, 10000, 100000, 100, FALSE);
finalize_nhdp_distributions(nHdp);
fprintf(stderr, "signalAlign - Serializing HDP to %s\n", nHdpOutFile);
serialize_nhdp(nHdp, nHdpOutFile);
destroy_nanopore_hdp(nHdp);
}
static double totalScore(stList *alignedPairs) {
double score = 0.0;
for (int64_t i = 0; i < stList_length(alignedPairs); i++) {
stIntTuple *aPair = stList_get(alignedPairs, i);
score += stIntTuple_get(aPair, 0);
}
return score;
}
double scoreByPosteriorProbabilityIgnoringGaps(stList *alignedPairs) {
/*
* Gives the average posterior match probability per base of the two sequences, ignoring indels.
*/
return 100.0 * totalScore(alignedPairs) / ((double) stList_length(alignedPairs) * PAIR_ALIGNMENT_PROB_1);
}
stList *performSignalAlignment(StateMachine *sM, Sequence *eventSequence, int64_t *eventMap,
int64_t mapOffset, char *target, PairwiseAlignmentParameters *p,
stList *unmappedAnchors, DegenerateType degenerate) {
if ((sM->type != threeState) && (sM->type != threeStateHdp)) {
st_errAbort("signalAlign - You're trying to do the wrong king of alignment");
}
int64_t lX = sequence_correctSeqLength(strlen(target), kmer, sM->kmerLength);
// remap anchor pairs
stList *filteredRemappedAnchors = signalUtils_getRemappedAnchorPairs(unmappedAnchors, eventMap, mapOffset);
// make sequences
Sequence *sX = sequence_constructReferenceKmerSequence(lX, target, sequence_getKmer,
sequence_sliceNucleotideSequence, degenerate, kmer);
// do alignment
stList *alignedPairs = getAlignedPairsUsingAnchors(sM, sX, eventSequence, filteredRemappedAnchors, p,
diagonalCalculationPosteriorMatchProbs, 1, 1);
return alignedPairs;
}
Sequence *makeEventSequenceFromPairwiseAlignment(double *events, int64_t queryStart, int64_t queryEnd,
int64_t *eventMap) {
// find the event mapped to the start and end of the 2D read alignment
int64_t startIdx = eventMap[queryStart];
int64_t endIdx = eventMap[queryEnd];
// move the event pointer to the first event
size_t elementSize = sizeof(double);
void *elements = (char *)events + ((startIdx * NB_EVENT_PARAMS) * elementSize);
// make the eventSequence
Sequence *eventS = sequence_constructEventSequence(endIdx - startIdx, elements);
return eventS;
}
void getSignalExpectations(StateMachine *sM, Hmm *hmmExpectations, Sequence *eventSequence,
int64_t *eventMap, int64_t mapOffset, char *trainingTarget, PairwiseAlignmentParameters *p,
stList *unmappedAnchors, DegenerateType degenerate) {
// correct sequence length
int64_t lX = sequence_correctSeqLength(strlen(trainingTarget), event, sM->kmerLength);
// remap the anchors
stList *filteredRemappedAnchors = signalUtils_getRemappedAnchorPairs(unmappedAnchors, eventMap, mapOffset);
Sequence *target = sequence_constructKmerSequence(
lX, trainingTarget, sequence_getKmer, sequence_sliceNucleotideSequence,
(degenerate == canonicalVariants ? CANONICAL_NUCLEOTIDES :
(degenerate == cytosineMethylation2 ? TWO_CYTOSINES : THREE_CYTOSINES)),
(degenerate == canonicalVariants ? NB_CANONICAL_BASES :
(degenerate == cytosineMethylation2 ? (NB_CYTOSINE_OPTIONS - 1) : NB_CYTOSINE_OPTIONS)),
kmer);
getExpectationsUsingAnchors(sM, hmmExpectations, target, eventSequence, filteredRemappedAnchors, p,
diagonalCalculation_Expectations, 1, 1);
}
int main(int argc, char *argv[]) {
StateMachineType sMtype = threeState;
int64_t j = 0;
int64_t diagExpansion = 50;
double threshold = 0.01;
int64_t constraintTrim = 14;
int64_t degenerate;
int64_t outFmt;
char *templateModelFile = NULL;
char *complementModelFile = NULL;
char *readLabel = NULL;
char *npReadFile = NULL;
char *forwardReference = NULL;
char *backwardReference = NULL;
char *errorCorrectPath = NULL;
char *posteriorProbsFile = NULL;
char *templateExpectationsFile = NULL;
char *complementExpectationsFile = NULL;
char *templateHdp = NULL;
char *complementHdp = NULL;
int key;
while (1) {
static struct option long_options[] = {
{"help", no_argument, 0, 'h'},
{"sm3Hdp", no_argument, 0, 'd'},
{"sparse_output", no_argument, 0, 's'},
{"degenerate", required_argument, 0, 'o'},
{"templateModel", required_argument, 0, 'T'},
{"complementModel", required_argument, 0, 'C'},
{"readLabel", required_argument, 0, 'L'},
{"npRead", required_argument, 0, 'q'},
{"forward_reference", required_argument, 0, 'f'},
{"backward_reference", required_argument, 0, 'b'},
{"error_correct_path", required_argument, 0, 'p'},
{"posteriors", required_argument, 0, 'u'},
{"templateHdp", required_argument, 0, 'v'},
{"complementHdp", required_argument, 0, 'w'},
{"templateExpectations", required_argument, 0, 't'},
{"complementExpectations", required_argument, 0, 'c'},
{"diagonalExpansion", required_argument, 0, 'x'},
{"threshold", required_argument, 0, 'D'},
{"constraintTrim", required_argument, 0, 'm'},
{0, 0, 0, 0} };
int option_index = 0;
key = getopt_long(argc, argv, "h:d:s:o:p:a:T:C:L:q:f:b:p:u:v:w:t:c:x:D:m:",
long_options, &option_index);
if (key == -1) {
//usage();
break;
}
switch (key) {
case 'h':
usage();
return 1;
case 's':
j = sscanf(optarg, "%" PRIi64 "", &outFmt);
assert (j == 1);
//sparseOutput = TRUE;
break;
case 'o':
j = sscanf(optarg, "%" PRIi64 "", °enerate);
assert (j == 1);
break;
case 'd':
sMtype = threeStateHdp;
break;
case 'T':
templateModelFile = stString_copy(optarg);
break;
case 'C':
complementModelFile = stString_copy(optarg);
break;
case 'L':
readLabel = stString_copy(optarg);
break;
case 'q':
npReadFile = stString_copy(optarg);
break;
case 'f':
forwardReference = stString_copy(optarg);
break;
case 'b':
backwardReference= stString_copy(optarg);
break;
case 'p':
errorCorrectPath = stString_copy(optarg);
break;
case 'u':
posteriorProbsFile = stString_copy(optarg);
break;
case 't':
templateExpectationsFile = stString_copy(optarg);
break;
case 'c':
complementExpectationsFile = stString_copy(optarg);
break;
case 'v':
templateHdp = stString_copy(optarg);
break;
case 'w':
complementHdp = stString_copy(optarg);
break;
case 'x':
j = sscanf(optarg, "%" PRIi64 "", &diagExpansion);
assert (j == 1);
assert (diagExpansion >= 0);
diagExpansion = (int64_t)diagExpansion;
break;
case 'D':
j = sscanf(optarg, "%lf", &threshold);
assert (j == 1);
assert (threshold >= 0);
break;
case 'm':
j = sscanf(optarg, "%" PRIi64 "", &constraintTrim);
assert (j == 1);
assert (constraintTrim >= 0);
constraintTrim = (int64_t)constraintTrim;
break;
default:
usage();
return 1;
}
}
(void) j; // silence unused variable warning.
// check for models
if ((templateModelFile == NULL) || (complementModelFile == NULL)) {
st_errAbort("Missing model files, exiting\n");
return 1;
}
// Anchors //
// get pairwise alignment from stdin, in exonerate CIGAR format
FILE *fileHandleIn = stdin;
// parse input CIGAR to get anchors
struct PairwiseAlignment *pA;
pA = cigarRead(fileHandleIn);
// Alignment Parameters //
// make the pairwise alignment parameters
PairwiseAlignmentParameters *p = pairwiseAlignmentBandingParameters_construct();
p->threshold = threshold;
p->constraintDiagonalTrim = constraintTrim;
p->diagonalExpansion = diagExpansion;
// HDP routines //
// load HDPs
NanoporeHDP *nHdpT, *nHdpC;
// check
if ((templateHdp != NULL) || (complementHdp != NULL)) {
if ((templateHdp == NULL) || (complementHdp == NULL)) {
st_errAbort("Need to have template and complement HDPs");
}
if (sMtype != threeStateHdp) {
fprintf(stderr, "[signalAlign] - Warning: this kind of stateMachine does not use the HDPs you gave\n");
}
fprintf(stderr, "[signalAlign] - using NanoporeHDPs\n");
}
#pragma omp parallel sections
{
{
nHdpT = (templateHdp == NULL) ? NULL : deserialize_nhdp(templateHdp);
}
#pragma omp section
{
nHdpC = (complementHdp == NULL) ? NULL : deserialize_nhdp(complementHdp);
}
}
ReferenceSequence *R;
if (errorCorrectPath == NULL) { // not doing error correction
if ((forwardReference == NULL) || (backwardReference == NULL)) {
st_errAbort("[signalAlign] - ERROR: did not get reference files %s %s\n",
forwardReference, backwardReference);
}
R = signalUtils_ReferenceSequenceConstructFull(forwardReference, backwardReference, pA);
} else {
R = signalUtils_ReferenceSequenceConstructEmpty(pA);
}
// Nanopore Read //
// load nanopore read
NanoporeRead *npRead = nanopore_loadNanoporeReadFromFile(npReadFile);
// constrain the event sequence to the positions given by the guide alignment
Sequence *tEventSequence = makeEventSequenceFromPairwiseAlignment(npRead->templateEvents,
pA->start2, pA->end2,
npRead->templateEventMap);
Sequence *cEventSequence = makeEventSequenceFromPairwiseAlignment(npRead->complementEvents,
pA->start2, pA->end2,
npRead->complementEventMap);
// the aligned pairs start at (0,0) so we need to correct them based on the guide alignment later.
// record the pre-zeroed alignment start and end coordinates here
// for the events:
int64_t tCoordinateShift = npRead->templateEventMap[pA->start2];
int64_t cCoordinateShift = npRead->complementEventMap[pA->start2];
// and for the reference:
int64_t rCoordinateShift_t = pA->start1;
int64_t rCoordinateShift_c = pA->end1;
bool forward = pA->strand1; // keep track of whether this is a forward mapped read or not
stList *anchorPairs = signalUtils_guideAlignmentToRebasedAnchorPairs(pA, p); // pA gets modified here, no turning back
if (errorCorrectPath != NULL) {
st_errAbort("signalAlign - Error correction not implemented, yet\n");
st_uglyf("Starting error correcting routine\n");
//stList *aP = stList_construct3(0, &free);
//char *fR, *bR, *perIterationOutputFile;
//stList *templateAlignedPairs, *complementAlignedPairs;
// #pragma omp parallel for
for (int64_t i = 0; i < STEP; i++) {
StateMachine *sMt = buildStateMachine(templateModelFile, npRead->templateParams, sMtype, nHdpT);
StateMachine *sMc = buildStateMachine(complementModelFile, npRead->complementParams, sMtype, nHdpC);
if (posteriorProbsFile == NULL) {
st_errAbort("SignalAlign - didn't find output file path\n");
}
// load the first forward and backward reference sequences
char *fR = stString_print("%sforward_sub%i.txt", errorCorrectPath, i);
char *bR = stString_print("%sbackward_sub%i.txt", errorCorrectPath, i);
if (!stFile_exists(fR) || !stFile_exists(bR)) {
st_errAbort("Error finding error correct references %s %s\n", fR, bR);
}
// set referenceSequence to this iteration's sequence
signalUtils_ReferenceSequenceSet(R, fR, bR);
fprintf(stderr, "signalAlign - starting template alignment round %"PRId64"\n", i);
// get aligned pairs
stList *templateAlignedPairs = performSignalAlignment(sMt, tEventSequence, npRead->templateEventMap,
pA->start2, R->getTemplateTargetSequence(R),
p, anchorPairs, degenerate);
double templatePosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(templateAlignedPairs);
fprintf(stdout, "%s :: Iteration %"PRId64", # alignedPairs (template): %"PRId64", score: %f\n",
readLabel, i, stList_length(templateAlignedPairs), templatePosteriorScore);
stList_sort(templateAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing
char *perIterationOutputFile = stString_print("%s.%i", posteriorProbsFile, i);
// write to file
writePosteriorProbsFull(perIterationOutputFile, readLabel,
sMt,
npRead->templateParams, npRead->templateEvents,
R->getTemplateTargetSequence(R),
forward, pA->contig1,
tCoordinateShift, rCoordinateShift_t,
templateAlignedPairs, template);
fprintf(stderr, "signalAlign - starting complement alignment round %"PRId64"\n", i);
stList *complementAlignedPairs = performSignalAlignment(sMc, cEventSequence,
npRead->complementEventMap, pA->start2,
R->getComplementTargetSequence(R),
p, anchorPairs, degenerate);
double complementPosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(complementAlignedPairs);
// sort
stList_sort(complementAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing
fprintf(stdout, "%s :: Iteration %"PRId64", # alignedPairs (complement): %"PRId64", score: %f\n",
readLabel, i, stList_length(complementAlignedPairs), complementPosteriorScore);
writePosteriorProbsFull(perIterationOutputFile, readLabel,
sMc,
npRead->complementParams, npRead->complementEvents,
R->getComplementTargetSequence(R),
forward, pA->contig1,
cCoordinateShift, rCoordinateShift_c,
complementAlignedPairs, complement);
stList_destruct(templateAlignedPairs);
stList_destruct(complementAlignedPairs);
stateMachine_destruct(sMt);
stateMachine_destruct(sMc);
free(perIterationOutputFile);
free(fR);
free(bR);
}
// #pragma omp critical
{
signalUtils_ReferenceSequenceDestruct(R);
sequence_destruct(tEventSequence);
sequence_destruct(cEventSequence);
destructPairwiseAlignment(pA);
}
return 0;
} else if ((templateExpectationsFile != NULL) && (complementExpectationsFile != NULL)) {
st_uglyf("Starting expectations routine\n");
// Expectation Routine //
StateMachine *sMt = buildStateMachine(templateModelFile, npRead->templateParams, sMtype, nHdpT);
// temporary way to 'turn off' estimates if I want to
if (ESTIMATE_PARAMS) { //todo remove threshold, not used
signalUtils_estimateNanoporeParams(sMt, npRead, &npRead->templateParams, ASSIGNMENT_THRESHOLD,
signalUtils_templateOneDAssignmentsFromRead,
nanopore_adjustTemplateEventsForDrift);
}
// make empty HMM to collect expectations
Hmm *templateExpectations = hmmContinuous_getExpectationsHmm(sMt, p->threshold, 0.001, 0.001);
// get expectations for template
fprintf(stderr, "signalAlign - getting expectations for template\n");
getSignalExpectations(sMt, templateExpectations, tEventSequence, npRead->templateEventMap,
pA->start2,
R->getTemplateTargetSequence(R),
p, anchorPairs, degenerate);
if (sMtype == threeStateHdp) {
fprintf(stderr, "signalAlign - got %" PRId64 "template HDP assignments\n",
hmmContinuous_howManyAssignments(templateExpectations));
}
// write to file
fprintf(stderr, "signalAlign - writing expectations to file: %s\n", templateExpectationsFile);
hmmContinuous_writeToFile(templateExpectationsFile, templateExpectations, sMtype);
// get expectations for the complement
fprintf(stderr, "signalAlign - getting expectations for complement\n");
StateMachine *sMc = buildStateMachine(complementModelFile, npRead->complementParams, sMtype, nHdpC);
if (ESTIMATE_PARAMS) {
signalUtils_estimateNanoporeParams(sMc, npRead, &npRead->complementParams, ASSIGNMENT_THRESHOLD,
signalUtils_complementOneDAssignmentsFromRead,
nanopore_adjustComplementEventsForDrift);
}
Hmm *complementExpectations = hmmContinuous_getExpectationsHmm(sMc, p->threshold, 0.001, 0.001);
getSignalExpectations(sMc, complementExpectations, cEventSequence, npRead->complementEventMap,
pA->start2,
R->getComplementTargetSequence(R),
p, anchorPairs, degenerate);
if (sMtype == threeStateHdp) {
fprintf(stderr, "signalAlign - got %"PRId64"complement HDP assignments\n",
hmmContinuous_howManyAssignments(complementExpectations));
}
// write to file
fprintf(stderr, "signalAlign - writing expectations to file: %s\n", complementExpectationsFile);
hmmContinuous_writeToFile(complementExpectationsFile, complementExpectations, sMtype);
stateMachine_destruct(sMt);
stateMachine_destruct(sMc);
signalUtils_ReferenceSequenceDestruct(R);
hmmContinuous_destruct(templateExpectations, sMtype);
hmmContinuous_destruct(complementExpectations, sMtype);
nanopore_nanoporeReadDestruct(npRead);
sequence_destruct(tEventSequence);
sequence_destruct(cEventSequence);
pairwiseAlignmentBandingParameters_destruct(p);
destructPairwiseAlignment(pA);
stList_destruct(anchorPairs);
return 0;
} else {
// Alignment Procedure //
// Template alignment
fprintf(stderr, "signalAlign - starting template alignment\n");
// make template stateMachine
StateMachine *sMt = buildStateMachine(templateModelFile, npRead->templateParams, sMtype, nHdpT);
// re-estimate the nanoporeAdjustment parameters
if (ESTIMATE_PARAMS) {
signalUtils_estimateNanoporeParams(sMt, npRead, &npRead->templateParams, ASSIGNMENT_THRESHOLD,
signalUtils_templateOneDAssignmentsFromRead,
nanopore_adjustTemplateEventsForDrift);
//signalUtils_estimateNanoporeParamsFromTable("../models/testModelR9_acgt_template.model",
// npRead, &npRead->templateParams, ASSIGNMENT_THRESHOLD,
// signalUtils_templateOneDAssignmentsFromRead,
// nanopore_adjustTemplateEventsForDrift);
}
if (sMtype == threeStateHdp) {
stateMachine3_setModelToHdpExpectedValues(sMt, nHdpT);
}
stList *templateAlignedPairs = performSignalAlignment(sMt, tEventSequence, npRead->templateEventMap,
pA->start2, R->getTemplateTargetSequence(R),
p, anchorPairs,
degenerate);
double templatePosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(templateAlignedPairs);
// sort
stList_sort(templateAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing
// write to file
if (posteriorProbsFile != NULL) {
outputAlignment(outFmt, posteriorProbsFile, readLabel, sMt, npRead->templateParams, npRead->templateEvents,
R->getTemplateTargetSequence(R), forward, pA->contig1, tCoordinateShift, rCoordinateShift_t,
templateAlignedPairs, template);
}
// Complement alignment
fprintf(stderr, "signalAlign - starting complement alignment\n");
StateMachine *sMc = buildStateMachine(complementModelFile, npRead->complementParams, sMtype, nHdpC);
if (ESTIMATE_PARAMS) {
signalUtils_estimateNanoporeParams(sMc, npRead, &npRead->complementParams, ASSIGNMENT_THRESHOLD,
signalUtils_complementOneDAssignmentsFromRead,
nanopore_adjustComplementEventsForDrift);
//signalUtils_estimateNanoporeParamsFromTable("../models/testModelR9_acgt_complement.model",
// npRead, &npRead->complementParams, ASSIGNMENT_THRESHOLD,
// signalUtils_complementOneDAssignmentsFromRead,
// nanopore_adjustComplementEventsForDrift);
}
if (sMtype == threeStateHdp) {
stateMachine3_setModelToHdpExpectedValues(sMc, nHdpC);
}
stList *complementAlignedPairs = performSignalAlignment(sMc, cEventSequence,
npRead->complementEventMap, pA->start2,
R->getComplementTargetSequence(R),
p, anchorPairs, degenerate);
double complementPosteriorScore = scoreByPosteriorProbabilityIgnoringGaps(complementAlignedPairs);
// sort
stList_sort(complementAlignedPairs, sortByXPlusYCoordinate2); //Ensure the coordinates are increasing
// write to file
if (posteriorProbsFile != NULL) {
outputAlignment(outFmt, posteriorProbsFile, readLabel, sMc, npRead->complementParams,
npRead->complementEvents, R->getComplementTargetSequence(R), forward, pA->contig1,
cCoordinateShift, rCoordinateShift_c, complementAlignedPairs, complement);
}
fprintf(stdout, "%s %"PRId64"\t%"PRId64"(%f)\t", readLabel, stList_length(anchorPairs),
stList_length(templateAlignedPairs), templatePosteriorScore);
fprintf(stdout, "%"PRId64"(%f)\n", stList_length(complementAlignedPairs), complementPosteriorScore);
// final alignment clean up
destructPairwiseAlignment(pA);
nanopore_nanoporeReadDestruct(npRead);
signalUtils_ReferenceSequenceDestruct(R);
stateMachine_destruct(sMt);
sequence_destruct(tEventSequence);
stList_destruct(templateAlignedPairs);
stateMachine_destruct(sMc);
sequence_destruct(cEventSequence);
stList_destruct(complementAlignedPairs);
fprintf(stderr, "signalAlign - SUCCESS: finished alignment of query %s, exiting\n", readLabel);
}
return 0;
}
|
8.race2.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define M 200
#define N 200
int main() {
double A[M], B[M][N], C[N], sum = 0.0;
#pragma omp parallel for // private(sum)
for (int i = 0; i < M; i++) {
sum = 0.0;
for (int j = 0; j < N; j++) {
sum += B[i][j] * C[j];
}
A[i] = sum;
}
}
// CHECK: Data Race detected
// END
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
register ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainPixelOffset(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return((ssize_t) floor(ConstrainPixelOffset(x)));
return((ssize_t) ceil(ConstrainPixelOffset(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
const StringInfo
*profile;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image,exception);
if (status == MagickFalse)
{
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
if (status != MagickFalse)
{
profile=GetImageProfile(extent_image,"8bim");
if (profile != (StringInfo *) NULL)
Update8BIMClipPath(profile,image->columns,image->rows,geometry);
}
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
convolution_5x5.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv5x5s1_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
float* outptr2 = outptr + outw;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 25 + q * 25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* r5 = img0 + w * 5;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0 + 4);
float32x4_t _k891011 = vld1q_f32(kernel0 + 8);
float32x4_t _k12131415 = vld1q_f32(kernel0 + 12);
float32x4_t _k16171819 = vld1q_f32(kernel0 + 16);
float32x4_t _k20212223 = vld1q_f32(kernel0 + 20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
int i = 0;
for (; i + 1 < outh; i += 2)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
// v11 = rx1 / rx3
// v12 = rx2
// v13 v14 = intermediate sum register
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n" // v7 = out
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v8.4s}, [%2] \n" // v8 = out2
// r1
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v9.4s, v10.4s}, [%4] \n" // v9 v10 = r10 r14
"add %4, %4, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r11
"fmul v13.4s, v9.4s, %19.s[1] \n"
"fmla v8.4s, v9.4s, %18.s[0] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r12
"fmla v7.4s, v11.4s, %19.s[2] \n"
"fmul v14.4s, v11.4s, %18.s[1] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r13
"fmla v13.4s, v12.4s, %19.s[3] \n"
"fmla v8.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v11.4s, %20.s[0] \n"
"fmla v14.4s, v11.4s, %18.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v13.4s, v10.4s, %20.s[1] \n"
"fmla v8.4s, v10.4s, %19.s[0] \n"
// r2
"ld1 {v9.4s, v10.4s}, [%5] \n" // v9 v10 = r20 r24
"add %5, %5, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r21
"fmla v7.4s, v9.4s, %20.s[2] \n"
"fmla v14.4s, v9.4s, %19.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r22
"fmla v13.4s, v11.4s, %20.s[3] \n"
"fmla v8.4s, v11.4s, %19.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r23
"fmla v7.4s, v12.4s, %21.s[0] \n"
"fmla v14.4s, v12.4s, %19.s[3] \n"
"fmla v13.4s, v11.4s, %21.s[1] \n"
"fmla v8.4s, v11.4s, %20.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v7.4s, v10.4s, %21.s[2] \n"
"fmla v14.4s, v10.4s, %20.s[1] \n"
// r3
"ld1 {v9.4s, v10.4s}, [%6] \n" // v9 v10 = r30 r34
"add %6, %6, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r31
"fmla v13.4s, v9.4s, %21.s[3] \n"
"fmla v8.4s, v9.4s, %20.s[2] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r32
"fmla v7.4s, v11.4s, %22.s[0] \n"
"fmla v14.4s, v11.4s, %20.s[3] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r33
"fmla v13.4s, v12.4s, %22.s[1] \n"
"fmla v8.4s, v12.4s, %21.s[0] \n"
"fmla v7.4s, v11.4s, %22.s[2] \n"
"fmla v14.4s, v11.4s, %21.s[1] \n"
"prfm pldl1keep, [%7, #256] \n"
"fmla v13.4s, v10.4s, %22.s[3] \n"
"fmla v8.4s, v10.4s, %21.s[2] \n"
// r4
"ld1 {v9.4s, v10.4s}, [%7] \n" // v9 v10 = r40 r44
"add %7, %7, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r41
"fmla v7.4s, v9.4s, %23.s[0] \n"
"fmla v14.4s, v9.4s, %21.s[3] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r41
"fmla v13.4s, v11.4s, %23.s[1] \n"
"fmla v8.4s, v11.4s, %22.s[0] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r41
"fmla v7.4s, v12.4s, %23.s[2] \n"
"fmla v14.4s, v12.4s, %22.s[1] \n"
"fmla v13.4s, v11.4s, %23.s[3] \n"
"fmla v8.4s, v11.4s, %22.s[2] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmla v7.4s, v10.4s, %24.s[0] \n"
"fmla v14.4s, v10.4s, %22.s[3] \n"
// r0 and r5
"ld1 {v9.4s, v10.4s}, [%3] \n" // v9 v10 = r00 r04
"add %3, %3, #16 \n"
"ext v11.16b, v9.16b, v10.16b, #4 \n" //r01
"fmla v13.4s, v11.4s, %18.s[1] \n"
"ext v12.16b, v9.16b, v10.16b, #8 \n" //r02
"fmla v7.4s, v12.4s, %18.s[2] \n"
"ext v11.16b, v9.16b, v10.16b, #12 \n" //r03
"prfm pldl1keep, [%8, #256] \n"
"fmla v13.4s, v11.4s, %18.s[3] \n"
// r5
"ld1 {v11.4s, v12.4s}, [%8] \n" // v11 v12 = r50 r54
"add %8, %8, #16 \n"
"fmla v8.4s, v11.4s, %23.s[0] \n"
"fmla v14.4s, v12.4s, %24.s[0] \n"
"fmla v7.4s, v9.4s, %18.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[0] \n"
"ext v9.16b, v11.16b, v12.16b, #4 \n" //r51
"ext v10.16b, v11.16b, v12.16b, #8 \n" //r52
"fmla v14.4s, v9.4s, %23.s[1] \n"
"ext v9.16b, v11.16b, v12.16b, #12 \n" //r53
"fmla v8.4s, v10.4s, %23.s[2] \n"
"fmla v14.4s, v9.4s, %23.s[3] \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"st1 {v7.4s}, [%1], #16 \n"
"fadd v8.4s, v8.4s, v14.4s \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v7.4s}, [%1] \n" // v7 = out
"st1 {v8.4s}, [%2], #16 \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
// "veor q13, q13 \n"
// "veor q14, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n" // q7 = out
"0: \n"
// q11 = rx1 / rx3
// q12 = rx2
// q13 q14 = intermediate sum register
"pld [%2, #128] \n"
"vld1.f32 {d16-d17}, [%2] \n" // q8 = out2
"pld [%4, #256] \n"
// r1
"vld1.f32 {d18-d21}, [%4] \n" // q9 q10 = r10 r14
"add %4, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r11
"vmul.f32 q13, q9, %e19[1] \n"
"vmla.f32 q8, q9, %e18[0] \n"
"vext.32 q12, q9, q10, #2 \n" // r12
"vmla.f32 q7, q11, %f19[0] \n"
"vmul.f32 q14, q11, %e18[1] \n"
"vext.32 q11, q9, q10, #3 \n" // r13
"vmla.f32 q13, q12, %f19[1] \n"
"vmla.f32 q8, q12, %f18[0] \n"
"vmla.f32 q7, q11, %e20[0] \n"
"vmla.f32 q14, q11, %f18[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q13, q10, %e20[1] \n"
"vmla.f32 q8, q10, %e19[0] \n"
// r2
"vld1.f32 {d18-d21}, [%5] \n" // q9 q10 = r20 r24
"add %5, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r21
"vmla.f32 q7, q9, %f20[0] \n"
"vmla.f32 q14, q9, %e19[1] \n"
"vext.32 q12, q9, q10, #2 \n" // r22
"vmla.f32 q13, q11, %f20[1] \n"
"vmla.f32 q8, q11, %f19[0] \n"
"vext.32 q11, q9, q10, #3 \n" // r23
"vmla.f32 q7, q12, %e21[0] \n"
"vmla.f32 q14, q12, %f19[1] \n"
"vmla.f32 q13, q11, %e21[1] \n"
"vmla.f32 q8, q11, %e20[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q7, q10, %f21[0] \n"
"vmla.f32 q14, q10, %e20[1] \n"
// r3
"vld1.f32 {d18-d21}, [%6] \n" // q9 q10 = r30 r34
"add %6, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r31
"vmla.f32 q13, q9, %f21[1] \n"
"vmla.f32 q8, q9, %f20[0] \n"
"vext.32 q12, q9, q10, #2 \n" // r32
"vmla.f32 q7, q11, %e22[0] \n"
"vmla.f32 q14, q11, %f20[1] \n"
"vext.32 q11, q9, q10, #3 \n" // r33
"vmla.f32 q13, q12, %e22[1] \n"
"vmla.f32 q8, q12, %e21[0] \n"
"vmla.f32 q7, q11, %f22[0] \n"
"vmla.f32 q14, q11, %e21[1] \n"
"pld [%7, #256] \n"
"vmla.f32 q13, q10, %f22[1] \n"
"vmla.f32 q8, q10, %f21[0] \n"
// r4
"vld1.f32 {d18-d21}, [%7] \n" // q9 q10 = r40 r44
"add %7, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r41
"vmla.f32 q7, q9, %e23[0] \n"
"vmla.f32 q14, q9, %f21[1] \n"
"vext.32 q12, q9, q10, #2 \n" // r42
"vmla.f32 q13, q11, %e23[1] \n"
"vmla.f32 q8, q11, %e22[0] \n"
"vext.32 q11, q9, q10, #3 \n" // r43
"vmla.f32 q7, q12, %f23[0] \n"
"vmla.f32 q14, q12, %e22[1] \n"
"vmla.f32 q13, q11, %f23[1] \n"
"vmla.f32 q8, q11, %f22[0] \n"
"pld [%3, #256] \n"
"vmla.f32 q7, q10, %e24[0] \n"
"vmla.f32 q14, q10, %f22[1] \n"
// r0 and r5
"vld1.f32 {d18-d21}, [%3] \n" // q9 q10 = r00 r04
"add %3, #16 \n"
"vext.32 q11, q9, q10, #1 \n" // r01
"vmla.f32 q13, q11, %e18[1] \n"
"vext.32 q12, q9, q10, #2 \n" // r02
"vmla.f32 q7, q12, %f18[0] \n"
"vext.32 q11, q9, q10, #3 \n" // r03
"pld [%8, #256] \n"
"vmla.f32 q13, q11, %f18[1] \n"
// r5
"vld1.f32 {d22-d25}, [%8] \n" // q11 q12 = r50 r54
"add %8, #16 \n"
"vmla.f32 q8, q11, %e23[0] \n"
"vmla.f32 q14, q12, %e24[0] \n"
"vmla.f32 q7, q9, %e18[0] \n"
"vmla.f32 q13, q10, %e19[0] \n"
"vext.32 q9, q11, q12, #1 \n" // r51
"vext.32 q10, q11, q12, #2 \n" // r52
"vmla.f32 q14, q9, %e23[1] \n"
"vext.32 q9, q11, q12, #3 \n" // r53
"vmla.f32 q8, q10, %f23[0] \n"
"vmla.f32 q14, q9, %f23[1] \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q13, q13 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"vadd.f32 q8, q8, q14 \n"
"pld [%1, #128] \n"
"vld1.f32 {d14-d15}, [%1] \n" // q7 = out
// "veor q14, q14 \n"
"vst1.f32 {d16-d17}, [%2]! \n"
"subs %0, #1 \n"
"bne 0b \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(outptr2), // %2
"=r"(r0), // %3
"=r"(r1), // %4
"=r"(r2), // %5
"=r"(r3), // %6
"=r"(r4), // %7
"=r"(r5) // %8
: "0"(nn),
"1"(outptr),
"2"(outptr2),
"3"(r0),
"4"(r1),
"5"(r2),
"6"(r3),
"7"(r4),
"8"(r5),
"w"(_k0123), // %18
"w"(_k4567), // %19
"w"(_k891011), // %20
"w"(_k12131415), // %21
"w"(_k16171819), // %22
"w"(_k20212223), // %23
"w"(_k24242424) // %24
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = 0;
float sum2 = 0;
#if __ARM_NEON
float32x4_t _r1 = vld1q_f32(r1);
float32x4_t _k1 = vld1q_f32(k1);
float32x4_t _sum = vmulq_f32(_r1, _k1);
float32x4_t _sum2 = vmulq_f32(_r1, _k0123);
float32x4_t _r2 = vld1q_f32(r2);
float32x4_t _k2 = vld1q_f32(k2);
_sum = vmlaq_f32(_sum, _r2, _k2);
_sum2 = vmlaq_f32(_sum2, _r2, _k1);
float32x4_t _r3 = vld1q_f32(r3);
float32x4_t _k3 = vld1q_f32(k3);
_sum = vmlaq_f32(_sum, _r3, _k3);
_sum2 = vmlaq_f32(_sum2, _r3, _k2);
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
_sum2 = vmlaq_f32(_sum2, _r4, _k3);
float32x4_t _r0 = vld1q_f32(r0);
_sum = vmlaq_f32(_sum, _r0, _k0123);
float32x4_t _r5 = vld1q_f32(r5);
_sum2 = vmlaq_f32(_sum2, _r5, _k20212223);
float32x4_t _k_t4 = {};
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4 = {};
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
_r_t4 = vextq_f32(_r_t4, _r_t4, 1);
_r_t4 = vsetq_lane_f32(r4[4], _r_t4, 3);
_sum2 = vmlaq_f32(_sum2, _r_t4, _k_t4);
sum2 = r5[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
float32x2_t _ss2 = vadd_f32(vget_low_f32(_sum2), vget_high_f32(_sum2));
float32x2_t _ss_ss2 = vpadd_f32(_ss, _ss2);
sum += vget_lane_f32(_ss_ss2, 0);
sum2 += vget_lane_f32(_ss_ss2, 1);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
sum2 += r1[0] * k0[0];
sum2 += r1[1] * k0[1];
sum2 += r1[2] * k0[2];
sum2 += r1[3] * k0[3];
sum2 += r1[4] * k0[4];
sum2 += r2[0] * k1[0];
sum2 += r2[1] * k1[1];
sum2 += r2[2] * k1[2];
sum2 += r2[3] * k1[3];
sum2 += r2[4] * k1[4];
sum2 += r3[0] * k2[0];
sum2 += r3[1] * k2[1];
sum2 += r3[2] * k2[2];
sum2 += r3[3] * k2[3];
sum2 += r3[4] * k2[4];
sum2 += r4[0] * k3[0];
sum2 += r4[1] * k3[1];
sum2 += r4[2] * k3[2];
sum2 += r4[3] * k3[3];
sum2 += r4[4] * k3[4];
sum2 += r5[0] * k4[0];
sum2 += r5[1] * k4[1];
sum2 += r5[2] * k4[2];
sum2 += r5[3] * k4[3];
sum2 += r5[4] * k4[4];
#endif // __ARM_NEON
*outptr += sum;
*outptr2 += sum2;
r0++;
r1++;
r2++;
r3++;
r4++;
r5++;
outptr++;
outptr2++;
}
r0 += 4 + w;
r1 += 4 + w;
r2 += 4 + w;
r3 += 4 + w;
r4 += 4 + w;
r5 += 4 + w;
outptr += outw;
outptr2 += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%1, #128] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v8.4s, v9.4s}, [%2] \n" // _r00 = vld1q_f32(r0+j);
"add %2, %2, #16 \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n" // _sum = vld1q_f32(outptr+j);
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r01
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r02
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r03
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v10.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v11.4s, %14.s[2] \n"
"fmul v15.4s, v12.4s, %14.s[3] \n"
"fmla v7.4s, v9.4s, %15.s[0] \n"
"ld1 {v8.4s, v9.4s}, [%3] \n"
"add %3, %3, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r11
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r12
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r13
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v10.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v11.4s, %15.s[3] \n"
"fmla v15.4s, v12.4s, %16.s[0] \n"
"fmla v7.4s, v9.4s, %16.s[1] \n"
"ld1 {v8.4s, v9.4s}, [%4] \n"
"add %4, %4, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r21
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r22
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r23
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v10.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v11.4s, %17.s[0] \n"
"fmla v15.4s, v12.4s, %17.s[1] \n"
"fmla v7.4s, v9.4s, %17.s[2] \n"
"ld1 {v8.4s, v9.4s}, [%5] \n"
"add %5, %5, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r31
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r32
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r33
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v10.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v11.4s, %18.s[1] \n"
"fmla v15.4s, v12.4s, %18.s[2] \n"
"fmla v7.4s, v9.4s, %18.s[3] \n"
"ld1 {v8.4s, v9.4s}, [%6] \n"
"add %6, %6, #16 \n"
"ext v10.16b, v8.16b, v9.16b, #4 \n" //_r41
"ext v11.16b, v8.16b, v9.16b, #8 \n" //_r42
"ext v12.16b, v8.16b, v9.16b, #12 \n" //_r43
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v10.4s, %19.s[1] \n"
"fmla v14.4s, v11.4s, %19.s[2] \n"
"fmla v15.4s, v12.4s, %19.s[3] \n"
"fmla v7.4s, v9.4s, %20.s[0] \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld1 {v8.4s, v9.4s}, [%2] \n"
"add %2, %2, #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%1, #128] \n"
"pld [%2, #256] \n"
"vld1.f32 {d16-d19}, [%2] \n" // _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n" // _sum = vld1q_f32(outptr+j);
// "veor q13, q13 \n"// _sum2 = 0;
// "veor q14, q14 \n"// _sum3 = 0;
"vext.32 q10, q8, q9, #1 \n" // _r01
"vext.32 q11, q8, q9, #2 \n" // _r02
"vext.32 q12, q8, q9, #3 \n" // _r03
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q10, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q11, %f14[0] \n"
"vmul.f32 q15, q12, %f14[1] \n"
"vmla.f32 q7, q9, %e15[0] \n"
"vld1.f32 {d16-d19}, [%3] \n"
"add %3, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q10, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q11, %f15[1] \n"
"vmla.f32 q15, q12, %e16[0] \n"
"vmla.f32 q7, q9, %e16[1] \n"
"vld1.f32 {d16-d19}, [%4] \n"
"add %4, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q10, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q11, %e17[0] \n"
"vmla.f32 q15, q12, %e17[1] \n"
"vmla.f32 q7, q9, %f17[0] \n"
"vld1.f32 {d16-d19}, [%5] \n"
"add %5, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q10, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q11, %e18[1] \n"
"vmla.f32 q15, q12, %f18[0] \n"
"vmla.f32 q7, q9, %f18[1] \n"
"vld1.f32 {d16-d19}, [%6] \n"
"add %6, #16 \n"
"vext.32 q10, q8, q9, #1 \n"
"vext.32 q11, q8, q9, #2 \n"
"vext.32 q12, q8, q9, #3 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q10, %e19[1] \n"
"vmla.f32 q14, q11, %f19[0] \n"
"vmla.f32 q15, q12, %f19[1] \n"
"vmla.f32 q7, q9, %e20[0] \n"
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sum3 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld1.f32 {d16-d19}, [%2] \n" // _r00 = vld1q_f32(r0+j);
"add %2, #16 \n"
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #16 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
float32x4_t _k_t4 = {};
_k_t4 = vsetq_lane_f32(k0[4], _k_t4, 0);
_k_t4 = vsetq_lane_f32(k1[4], _k_t4, 1);
_k_t4 = vsetq_lane_f32(k2[4], _k_t4, 2);
_k_t4 = vsetq_lane_f32(k3[4], _k_t4, 3);
float32x4_t _r_t4 = {};
_r_t4 = vsetq_lane_f32(r0[4], _r_t4, 0);
_r_t4 = vsetq_lane_f32(r1[4], _r_t4, 1);
_r_t4 = vsetq_lane_f32(r2[4], _r_t4, 2);
_r_t4 = vsetq_lane_f32(r3[4], _r_t4, 3);
_sum = vmlaq_f32(_sum, _r_t4, _k_t4);
sum = r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0++;
r1++;
r2++;
r3++;
r4++;
outptr++;
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
}
}
}
static void conv5x5s2_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const float* kernel = _kernel;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
out.fill(bias0);
for (int q = 0; q < inch; q++)
{
float* outptr = out;
const float* img0 = bottom_blob.channel(q);
const float* kernel0 = kernel + p * inch * 25 + q * 25;
const float* r0 = img0;
const float* r1 = img0 + w;
const float* r2 = img0 + w * 2;
const float* r3 = img0 + w * 3;
const float* r4 = img0 + w * 4;
const float* k0 = kernel0;
const float* k1 = kernel0 + 5;
const float* k2 = kernel0 + 10;
const float* k3 = kernel0 + 15;
const float* k4 = kernel0 + 20;
#if __ARM_NEON
float32x4_t _k0123 = vld1q_f32(kernel0);
float32x4_t _k4567 = vld1q_f32(kernel0 + 4);
float32x4_t _k891011 = vld1q_f32(kernel0 + 8);
float32x4_t _k12131415 = vld1q_f32(kernel0 + 12);
float32x4_t _k16171819 = vld1q_f32(kernel0 + 16);
float32x4_t _k20212223 = vld1q_f32(kernel0 + 20);
float32x4_t _k24242424 = vdupq_n_f32(kernel0[24]);
#endif // __ARM_NEON
for (int i = 0; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 2;
int remain = outw - (nn << 2);
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
#if __aarch64__
if (nn > 0)
{
asm volatile(
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n" // v8 = 0 2 4 6 q9 = 1 3 5 7
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v10.4s, v11.4s}, [%2] \n" // v10 = 8 10 12 14 v11 = 9 11 13 15
"prfm pldl1keep, [%1, #128] \n"
"0: \n"
"ld1 {v7.4s}, [%1] \n" // v7 = outptr
"ext v12.16b, v8.16b, v10.16b, #4 \n" // v12 = 2 4 6 8
"ext v11.16b, v9.16b, v11.16b, #4 \n" // v11 = 3 5 7 9
"ext v10.16b, v8.16b, v10.16b, #8 \n" // v10 = 4 6 8 10
"fmla v7.4s, v8.4s, %14.s[0] \n"
"fmul v13.4s, v9.4s, %14.s[1] \n"
"prfm pldl1keep, [%3, #256] \n"
"fmul v14.4s, v12.4s, %14.s[2] \n"
"fmul v15.4s, v11.4s, %14.s[3] \n"
"fmla v7.4s, v10.4s, %15.s[0] \n"
"ld2 {v8.4s, v9.4s}, [%3], #32 \n"
"prfm pldl1keep, [%3, #256] \n"
"ld2 {v10.4s, v11.4s}, [%3] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %15.s[1] \n"
"fmla v13.4s, v9.4s, %15.s[2] \n"
"prfm pldl1keep, [%4, #256] \n"
"fmla v14.4s, v12.4s, %15.s[3] \n"
"fmla v15.4s, v11.4s, %16.s[0] \n"
"fmla v7.4s, v10.4s, %16.s[1] \n"
"ld2 {v8.4s, v9.4s}, [%4], #32 \n"
"prfm pldl1keep, [%4, #256] \n"
"ld2 {v10.4s, v11.4s}, [%4] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %16.s[2] \n"
"fmla v13.4s, v9.4s, %16.s[3] \n"
"prfm pldl1keep, [%5, #256] \n"
"fmla v14.4s, v12.4s, %17.s[0] \n"
"fmla v15.4s, v11.4s, %17.s[1] \n"
"fmla v7.4s, v10.4s, %17.s[2] \n"
"ld2 {v8.4s, v9.4s}, [%5], #32 \n"
"prfm pldl1keep, [%5, #256] \n"
"ld2 {v10.4s, v11.4s}, [%5] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %17.s[3] \n"
"fmla v13.4s, v9.4s, %18.s[0] \n"
"prfm pldl1keep, [%6, #256] \n"
"fmla v14.4s, v12.4s, %18.s[1] \n"
"fmla v15.4s, v11.4s, %18.s[2] \n"
"fmla v7.4s, v10.4s, %18.s[3] \n"
"ld2 {v8.4s, v9.4s}, [%6], #32 \n"
"prfm pldl1keep, [%6, #256] \n"
"ld2 {v10.4s, v11.4s}, [%6] \n"
"ext v12.16b, v8.16b, v10.16b, #4 \n"
"ext v11.16b, v9.16b, v11.16b, #4 \n"
"ext v10.16b, v8.16b, v10.16b, #8 \n"
"fmla v7.4s, v8.4s, %19.s[0] \n"
"fmla v13.4s, v9.4s, %19.s[1] \n"
"fmla v14.4s, v12.4s, %19.s[2] \n"
"fmla v15.4s, v11.4s, %19.s[3] \n"
"fmla v7.4s, v10.4s, %20.s[0] \n"
"prfm pldl1keep, [%2, #256] \n"
"ld2 {v8.4s, v9.4s}, [%2], #32 \n"
"fadd v14.4s, v14.4s, v15.4s \n"
"fadd v7.4s, v7.4s, v13.4s \n"
"prfm pldl1keep, [%2, #256] \n"
"fadd v7.4s, v7.4s, v14.4s \n"
"ld2 {v10.4s, v11.4s}, [%2] \n"
"st1 {v7.4s}, [%1], #16 \n"
"prfm pldl1keep, [%1, #128] \n"
"subs %w0, %w0, #1 \n"
"bne 0b \n"
"sub %2, %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
}
#else
if (nn > 0)
{
asm volatile(
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
// "veor q14, q14 \n"// _sump3 = 0;
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n" // q8 = 0 2 4 6 q9 = 1 3 5 7
"pld [%2, #256] \n"
"vld2.f32 {d20-d23}, [%2] \n" // q10 = 8 10 12 14 q11 = 9 11 13 15
"pld [%1, #128] \n"
"0: \n"
"vld1.f32 {d14-d15}, [%1] \n" // q7 = outptr
"vext.32 q12, q8, q10, #1 \n" // q12 = 2 4 6 8
"vext.32 q11, q9, q11, #1 \n" // q11 = 3 5 7 9
"vext.32 q10, q8, q10, #2 \n" // q10 = 4 6 8 10
"vmla.f32 q7, q8, %e14[0] \n"
"vmul.f32 q13, q9, %e14[1] \n"
"pld [%3, #256] \n"
"vmul.f32 q14, q12, %f14[0] \n"
"vmul.f32 q15, q11, %f14[1] \n"
"vmla.f32 q7, q10, %e15[0] \n"
"vld2.f32 {d16-d19}, [%3]! \n"
"pld [%3, #256] \n"
"vld2.f32 {d20-d23}, [%3] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e15[1] \n"
"vmla.f32 q13, q9, %f15[0] \n"
"pld [%4, #256] \n"
"vmla.f32 q14, q12, %f15[1] \n"
"vmla.f32 q15, q11, %e16[0] \n"
"vmla.f32 q7, q10, %e16[1] \n"
"vld2.f32 {d16-d19}, [%4]! \n"
"pld [%4, #256] \n"
"vld2.f32 {d20-d23}, [%4] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f16[0] \n"
"vmla.f32 q13, q9, %f16[1] \n"
"pld [%5, #256] \n"
"vmla.f32 q14, q12, %e17[0] \n"
"vmla.f32 q15, q11, %e17[1] \n"
"vmla.f32 q7, q10, %f17[0] \n"
"vld2.f32 {d16-d19}, [%5]! \n"
"pld [%5, #256] \n"
"vld2.f32 {d20-d23}, [%5] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %f17[1] \n"
"vmla.f32 q13, q9, %e18[0] \n"
"pld [%6, #256] \n"
"vmla.f32 q14, q12, %e18[1] \n"
"vmla.f32 q15, q11, %f18[0] \n"
"vmla.f32 q7, q10, %f18[1] \n"
"vld2.f32 {d16-d19}, [%6]! \n"
"pld [%6, #256] \n"
"vld2.f32 {d20-d23}, [%6] \n"
"vext.32 q12, q8, q10, #1 \n"
"vext.32 q11, q9, q11, #1 \n"
"vext.32 q10, q8, q10, #2 \n"
"vmla.f32 q7, q8, %e19[0] \n"
"vmla.f32 q13, q9, %e19[1] \n"
"vmla.f32 q14, q12, %f19[0] \n"
"vmla.f32 q15, q11, %f19[1] \n"
"vmla.f32 q7, q10, %e20[0] \n"
"pld [%2, #256] \n"
"vld2.f32 {d16-d19}, [%2]! \n" // q8 = 0 2 4 6 q9 = 1 3 5 7
"vadd.f32 q14, q14, q15 \n"
"vadd.f32 q7, q7, q13 \n"
// "veor q15, q15 \n"// _sump3 = 0;
// "veor q13, q13 \n"// _sump2 = 0;
"pld [%2, #256] \n"
"vadd.f32 q7, q7, q14 \n"
"vld2.f32 {d20-d23}, [%2] \n" // q10 = 8 10 12 14 q11 = 9 11 13 15
// "veor q14, q14 \n"// _sump3 = 0;
"vst1.f32 {d14-d15}, [%1]! \n"
"pld [%1, #128] \n"
"subs %0, #1 \n"
"bne 0b \n"
"sub %2, #32 \n"
: "=r"(nn), // %0
"=r"(outptr), // %1
"=r"(r0), // %2
"=r"(r1), // %3
"=r"(r2), // %4
"=r"(r3), // %5
"=r"(r4) // %6
: "0"(nn),
"1"(outptr),
"2"(r0),
"3"(r1),
"4"(r2),
"5"(r3),
"6"(r4),
"w"(_k0123), // %14
"w"(_k4567), // %15
"w"(_k891011), // %16
"w"(_k12131415), // %17
"w"(_k16171819), // %18
"w"(_k20212223), // %19
"w"(_k24242424) // %20
: "cc", "memory", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
}
#endif // __aarch64__
#endif // __ARM_NEON
for (; remain > 0; remain--)
{
float sum = 0;
#if __ARM_NEON
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _sum = vmulq_f32(_r0, _k0123);
float32x4_t _r1 = vld1q_f32(r1);
_sum = vmlaq_f32(_sum, _r1, vld1q_f32(k1));
float32x4_t _r2 = vld1q_f32(r2);
_sum = vmlaq_f32(_sum, _r2, vld1q_f32(k2));
float32x4_t _r3 = vld1q_f32(r3);
_sum = vmlaq_f32(_sum, _r3, vld1q_f32(k3));
float32x4_t _r4 = vld1q_f32(r4);
_sum = vmlaq_f32(_sum, _r4, _k20212223);
sum += r0[4] * k0[4];
sum += r1[4] * k1[4];
sum += r2[4] * k2[4];
sum += r3[4] * k3[4];
sum += r4[4] * k4[4];
float32x2_t _ss = vadd_f32(vget_low_f32(_sum), vget_high_f32(_sum));
_ss = vpadd_f32(_ss, _ss);
sum += vget_lane_f32(_ss, 0);
#else
sum += r0[0] * k0[0];
sum += r0[1] * k0[1];
sum += r0[2] * k0[2];
sum += r0[3] * k0[3];
sum += r0[4] * k0[4];
sum += r1[0] * k1[0];
sum += r1[1] * k1[1];
sum += r1[2] * k1[2];
sum += r1[3] * k1[3];
sum += r1[4] * k1[4];
sum += r2[0] * k2[0];
sum += r2[1] * k2[1];
sum += r2[2] * k2[2];
sum += r2[3] * k2[3];
sum += r2[4] * k2[4];
sum += r3[0] * k3[0];
sum += r3[1] * k3[1];
sum += r3[2] * k3[2];
sum += r3[3] * k3[3];
sum += r3[4] * k3[4];
sum += r4[0] * k4[0];
sum += r4[1] * k4[1];
sum += r4[2] * k4[2];
sum += r4[3] * k4[3];
sum += r4[4] * k4[4];
#endif
*outptr += sum;
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
r4 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
}
|
GB_unop__frexpe_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__frexpe_fp64_fp64)
// op(A') function: GB (_unop_tran__frexpe_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = GB_frexpe (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_frexpe (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = GB_frexpe (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FREXPE || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__frexpe_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = GB_frexpe (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = GB_frexpe (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__frexpe_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SnoopFilterMapper.c | // John D. McCalpin, mccalpin@tacc.utexas.edu
static char const rcsid[] = "$Id: SnoopFilterMapper.c,v 1.11 2018/05/17 22:24:58 mccalpin Exp mccalpin $";
// include files
#include <stdio.h> // printf, etc
#include <stdint.h> // standard integer types, e.g., uint32_t
#include <signal.h> // for signal handler
#include <stdlib.h> // exit() and EXIT_FAILURE
#include <string.h> // strerror() function converts errno to a text string for printing
#include <fcntl.h> // for open()
#include <errno.h> // errno support
#include <assert.h> // assert() function
#include <unistd.h> // sysconf() function, sleep() function
#include <sys/mman.h> // support for mmap() function
#include <linux/mman.h> // required for 1GiB page support in mmap()
#include <math.h> // for pow() function used in RAPL computations
#include <time.h>
#include <sys/time.h> // for gettimeofday
# define ARRAYSIZE 2147483648L
// MYHUGEPAGE_1GB overrides default of 2MiB for hugepages
#if defined MYHUGEPAGE_1GB
#define MYPAGESIZE 1073741824UL
#define NUMPAGES 2L
#define PAGES_MAPPED 2L // this is still specifying how many 2MiB pages to map
#else
#define MYPAGESIZE 2097152L
#define NUMPAGES 1024L
#define PAGES_MAPPED 14L
#endif
#define SPECIAL_VALUE (-1)
// interfaces for va2pa_lib.c
void print_pagemap_entry(unsigned long long pagemap_entry);
unsigned long long get_pagemap_entry( void * va );
int dumpall; // when set to 1, will cause dump of lots of stuff for debugging
int report;
int nwraps; // track number of performance counter wraps
double *array; // array pointer to mmap on 1GiB pages
double *page_pointers[NUMPAGES]; // one pointer for each page allocated
uint64_t pageframenumber[NUMPAGES]; // one PFN entry for each page allocated
// constant value defines
# define NUM_SOCKETS 2 //
# define NUM_IMC_CHANNELS 6 // includes channels on all IMCs in a socket
# define NUM_IMC_COUNTERS 5 // 0-3 are the 4 programmable counters, 4 is the fixed-function DCLK counter
# define NUM_CHA_BOXES 28
# define NUM_CHA_USED 28
# define NUM_CHA_COUNTERS 4
long imc_counts[NUM_SOCKETS][NUM_IMC_CHANNELS][NUM_IMC_COUNTERS][2]; // including the fixed-function (DCLK) counter as the final entry
long imc_pkg_sums[NUM_SOCKETS][NUM_IMC_COUNTERS]; // sum across channels for each chip
char imc_event_name[NUM_SOCKETS][NUM_IMC_CHANNELS][NUM_IMC_COUNTERS][32]; // reserve 32 characters for the IMC event names for each socket, channel, counter
uint32_t imc_perfevtsel[NUM_IMC_COUNTERS]; // expected control settings for the counters
uint32_t imc_vid_did[3]; // PCIe configuration space vendor and device IDs for the IMC blocks
long cha_counts[NUM_SOCKETS][NUM_CHA_BOXES][NUM_CHA_COUNTERS][2]; // 2 sockets, 28 tiles per socket, 4 counters per tile, 2 times (before and after)
uint32_t cha_perfevtsel[NUM_CHA_COUNTERS];
long cha_pkg_sums[NUM_SOCKETS][NUM_CHA_COUNTERS];
#define MAXCORES 112
#define CORES_USED 24
// New feature -- core counters.
// upgrade to include counters for all cores
long core_counters[MAXCORES][4][2]; // 24 cores & 24 threads on one socket, 4 counters, before and after
long fixed_counters[MAXCORES][4][2]; // 24 cores with 4 fixed-function core counters (Instr, CoreCyc, RefCyc, TSC)
long core_pkg_sums[NUM_SOCKETS][4]; // four core counters
long fixed_pkg_sums[NUM_SOCKETS][4]; // four fixed-function counters per core (Instr, CoreCyc, RefCyc, TSC)
int8_t cha_by_page[PAGES_MAPPED][32768]; // L3 numbers for each of the 32,768 cache lines in each of the first PAGES_MAPPED 2MiB pages
uint64_t paddr_by_page[PAGES_MAPPED]; // physical addresses of the base of each of the first PAGES_MAPPED 2MiB pages used
long lines_by_cha[NUM_CHA_USED]; // bulk count of lines assigned to each CHA
#ifdef DEBUG
FILE *log_file; // log file for debugging -- should not be needed in production
#endif
unsigned int *mmconfig_ptr; // must be pointer to 32-bit int so compiler will generate 32-bit loads and stores
struct timeval tp; // seconds and microseconds from gettimeofday
struct timezone tzp; // required, but not used here.
double ssum(double *a, long vl);
double mysecond()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
# ifndef MIN
# define MIN(x,y) ((x)<(y)?(x):(y))
# endif
# ifndef MAX
# define MAX(x,y) ((x)>(y)?(x):(y))
# endif
#include "low_overhead_timers.c"
#include "SKX_IMC_BusDeviceFunctionOffset.h"
#include "MSR_defs.h"
// ===========================================================================================================================================================================
// Convert PCI(bus:device.function,offset) to uint32_t array index
uint32_t PCI_cfg_index(unsigned int Bus, unsigned int Device, unsigned int Function, unsigned int Offset)
{
uint32_t byteaddress;
uint32_t index;
assert (Device >= 0);
assert (Function >= 0);
assert (Offset >= 0);
assert (Device < (1<<5));
assert (Function < (1<<3));
assert (Offset < (1<<12));
byteaddress = (Bus<<20) | (Device<<15) | (Function<<12) | Offset;
index = byteaddress / 4;
return ( index );
}
// ===========================================================================================================================================================================
int main(int argc, char *argv[])
{
// local declarations
// int cpuid_return[4];
int i;
int retries;
int zeros;
int rc;
int core_pmc_width, fixed_pmc_width; // these will be looked up using CPUID to use in overflow/wraparound correction
int uncore_pmc_width=48; // all the uncore stuff is model-dependent, but most are 48 bits
ssize_t rc64;
char description[100];
size_t len;
long arraylen;
long l2_contained_size, inner_repetitions;
unsigned long pagemapentry;
unsigned long paddr, basephysaddr;
unsigned long pagenum, basepagenum;
uint32_t bus, device, function, offset, ctl_offset, ctr_offset, value, index;
uint32_t socket, imc, channel, counter, controller;
long count,delta;
long j,k,page_number,page_base_index,line_number;
long jstart[CORES_USED], jend[CORES_USED], mycore, vl[CORES_USED];
uint32_t low_0, high_0, low_1, high_1;
char filename[100];
int pkg, tile;
int nr_cpus;
uint64_t msr_val, msr_num;
int mem_fd;
int msr_fd[2]; // one for each socket
int proc_in_pkg[2]; // one Logical Processor number for each socket
uid_t my_uid;
gid_t my_gid;
double sum,expected;
double t0, t1;
double avg_cycles;
unsigned long tsc_start, tsc_end;
float TSC_GHz;
double sf_evict_rate;
double bandwidth;
unsigned long mmconfig_base=0x80000000; // DOUBLE-CHECK THIS ON NEW SYSTEMS!!!!! grep MMCONFIG /proc/iomem | awk -F- '{print $1}'
unsigned long mmconfig_size=0x10000000;
double private_sum,partial_sums[CORES_USED];
long iters,iteration_counts[CORES_USED];
long BaseOffset;
TSC_GHz = get_TSC_frequency()/1.0e9;
core_pmc_width = get_core_counter_width();
fixed_pmc_width = get_fixed_counter_width();
BaseOffset = 0;
#ifdef RANDOMOFFSETS
if (argc != 2) {
printf("Must Provide a Random Offset cache line offset value (an integer between 0 and 2^24-375000 (16,402,216))\n");
exit(1);
} else {
BaseOffset = atol(argv[1]);
printf("Random Cache Line Offset is %ld\n",BaseOffset);
BaseOffset = BaseOffset*8;
printf("Starting index for summation is %ld\n",BaseOffset);
}
#endif
retries = 0;
zeros = 0;
report = 1;
dumpall = 0;
nwraps = 0;
l2_contained_size = 125000 * CORES_USED; // about 95% of the L2 space in the cores used
for (i=0; i<CORES_USED; i++) {
iters = 0;
jstart[i] = BaseOffset + i*l2_contained_size/CORES_USED;
jend[i] = jstart[i] + l2_contained_size/CORES_USED;
vl[i] = jend[i]-jstart[i];
printf("thread %d jstart %ld jend %ld vl %ld\n",i,jstart[i],jend[i],vl[i]);
partial_sums[i] = 0.0;
iteration_counts[i] = 0;
for (counter=0; counter<4; counter++) {
core_counters[i][counter][0] = SPECIAL_VALUE;
core_counters[i][counter][1] = SPECIAL_VALUE;
fixed_counters[i][counter][0] = SPECIAL_VALUE;
fixed_counters[i][counter][1] = SPECIAL_VALUE;
}
}
// initialize the array that will hold the L3 numbers for each cache line for each of the first PAGES_MAPPED 2MiB pages
for (i=0; i<PAGES_MAPPED; i++) {
for (line_number=0; line_number<32768; line_number++) {
cha_by_page[i][line_number] = -1; // special value -- if set properly, all values should be in the range of 0..23
}
}
// allocate working array on a huge pages -- either 1GiB or 2MiB
len = NUMPAGES * MYPAGESIZE;
#if defined MYHUGEPAGE_1GB
array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );
#elif defined MYHUGEPAGE_THP
//array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0 );
rc = posix_memalign((void **)&array, (size_t) 2097152, (size_t) len);
if (rc != 0) {
printf("ERROR: posix_memalign call failed with error code %d\n",rc);
exit(3);
}
#else
array = (double*) mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );
#endif
if (array == (void *)(-1)) {
perror("ERROR: mmap of array a failed! ");
exit(1);
}
// initialize working array
arraylen = NUMPAGES * MYPAGESIZE/sizeof(double);
#pragma omp parallel for
for (j=0; j<arraylen; j++) {
array[j] = 1.0;
}
// initialize page_pointers to point to the beginning of each page in the array
// then get and print physical addresses for each
#ifdef VERBOSE
printf(" Page ArrayIndex VirtAddr PagemapEntry PFN PhysAddr\n");
#endif
for (j=0; j<NUMPAGES; j++) {
k = j*MYPAGESIZE/sizeof(double);
page_pointers[j] = &array[k];
pagemapentry = get_pagemap_entry(&array[k]);
pageframenumber[j] = (pagemapentry & (unsigned long) 0x007FFFFFFFFFFFFF);
#ifdef VERBOSE
printf(" %.5ld %.10ld %#18lx %#18lx %#18lx %#18lx\n",j,k,&array[k],pagemapentry,pageframenumber[j],(pageframenumber[j]<<12));
#endif
}
printf("PAGE_ADDRESSES ");
for (j=0; j<PAGES_MAPPED; j++) {
basephysaddr = pageframenumber[j] << 12;
paddr_by_page[j] = basephysaddr;
printf("0x%.12lx ",paddr_by_page[j]);
}
printf("\n");
// initialize arrays for counter data
for (socket=0; socket<NUM_SOCKETS; socket++) {
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
imc_counts[socket][channel][counter][0] = 0;
imc_counts[socket][channel][counter][1] = 0;
}
}
for (tile=0; tile<NUM_CHA_USED; tile++) {
lines_by_cha[tile] = 0;
for (counter=0; counter<4; counter++) {
cha_counts[socket][tile][counter][0] = 0;
cha_counts[socket][tile][counter][1] = 0;
}
}
}
// get the host name, assume that it is of the TACC standard form, and use this as part
// of the log file name.... Standard form is "c263-109.stampede2.tacc.utexas.edu", so
// truncating at the first "." is done by writing \0 to character #8.
len = 100;
rc = gethostname(description, len);
if (rc != 0) {
fprintf(stderr,"ERROR when trying to get hostname\n");
exit(-1);
}
description[8] = 0; // assume hostname of the form c263-109.stampede2.tacc.utexas.edu -- truncate after first period
my_uid = getuid();
my_gid = getgid();
#ifdef DEBUG
sprintf(filename,"log.%s.perf_counters",description);
// sprintf(filename,"log.perf_counters");
log_file = fopen(filename,"w+");
if (log_file == 0) {
fprintf(stderr,"ERROR %s when trying to open log file %s\n",strerror(errno),filename);
exit(-1);
}
fprintf(log_file,"DEBUG: my uid is %d, my gid is %d\n",my_uid,my_gid);
rc = chown(filename,my_uid,my_gid);
if (rc == 0) {
fprintf(log_file,"DEBUG: Successfully changed ownership of log file to %d %d\n",my_uid,my_gid);
} else {
fprintf(stderr,"ERROR: Attempt to change ownership of log file failed -- bailing out\n");
exit(-1);
}
#endif
//========================================================================================================================
// initial checks
// is this a supported core? (CPUID Family/Model)
// Every processor that I am going to see will be Family 0x06 (no ExtFamily needed).
// The DisplayModel field is (ExtModel<<4)+Model and should be 0x3F for all Xeon E5 v3 systems
int leaf = 1;
int subleaf = 0;
uint32_t eax, ebx, ecx, edx;
__asm__ __volatile__ ("cpuid" : \
"=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (leaf), "c" (subleaf));
// Alternate form:
// The compiler cpuid intrinsics are not documented by Intel -- they use the Microsoft format
// described at https://msdn.microsoft.com/en-us/library/hskdteyh.aspx
// __cpuid(array to hold eax,ebx,ecx,edx outputs, initial eax value)
// __cpuidex(array to hold eax,ebx,ecx,edx outputs, initial eax value, initial ecx value)
// CPUID function 0x01 returns the model info in eax.
// 27:20 ExtFamily -- expect 0x00
// 19:16 ExtModel -- expect 0x3 for HSW, 0x5 for SKX
// 11:8 Family -- expect 0x6
// 7:4 Model -- expect 0xf for HSW, 0x5 for SKX
// __cpuid(&cpuid_return[0], 1);
// uint32_t ModelInfo = cpuid_return[0] & 0x0fff0ff0; // mask out the reserved and "stepping" fields, leaving only the based and extended Family/Model fields
uint32_t ModelInfo = eax & 0x0fff0ff0; // mask out the reserved and "stepping" fields, leaving only the based and extended Family/Model fields
if (ModelInfo != 0x00050650) { // expected values for Skylake Xeon
fprintf(stderr,"ERROR -- this does not appear to be the correct processor type!!!\n");
fprintf(stderr,"ERROR -- Expected CPUID(0x01) Family/Model bits = 0x%x, but found 0x%x\n",0x00050650,ModelInfo);
exit(1);
}
#ifdef IMC_COUNTS
// ===================================================================================================================
// ------------------ REQUIRES ROOT PERMISSIONS ------------------
// open /dev/mem for PCI device access and mmap() a pointer to the beginning
// of the 256 MiB PCI Configuration Space.
// check VID/DID for uncore bus:device:function combinations
// Note that using /dev/mem for PCI configuration space access is required for some devices on KNL.
// It is not required on other systems, but it is not particularly inconvenient either.
sprintf(filename,"/dev/mem");
#ifdef DEBUG
fprintf(log_file,"opening %s\n",filename);
#endif
mem_fd = open(filename, O_RDWR);
if (mem_fd == -1) {
fprintf(stderr,"ERROR %s when trying to open %s\n",strerror(errno),filename);
exit(-1);
}
int map_prot = PROT_READ | PROT_WRITE;
mmconfig_ptr = mmap(NULL, mmconfig_size, map_prot, MAP_SHARED, mem_fd, mmconfig_base);
if (mmconfig_ptr == MAP_FAILED) {
fprintf(stderr,"cannot mmap base of PCI configuration space from /dev/mem: address %lx\n", mmconfig_base);
exit(2);
#ifdef DEBUG
} else {
fprintf(log_file,"Successful mmap of base of PCI configuration space from /dev/mem at address %lx\n", mmconfig_base);
#endif
}
close(mem_fd); // OK to close file after mmap() -- the mapping persists until unmap() or program exit
// New simple test that does not need to know the uncore bus numbers here...
// Skylake bus 0, Function 5, offset 0 -- Sky Lake-E MM/Vt-d Configuration Registers
//
// simple test -- should return "20248086" on Skylake Xeon EP -- DID 0x2024, VID 0x8086
bus = 0x00;
device = 0x5;
function = 0x0;
offset = 0x0;
index = PCI_cfg_index(bus, device, function, offset);
value = mmconfig_ptr[index];
if (value != 0x20248086) {
fprintf(stderr,"ERROR: Bus %x device %x function %x offset %x expected %x, found %x\n",bus,device,function,offset,0x20248086,value);
exit(3);
#ifdef DEBUG
} else {
fprintf(log_file,"DEBUG: Well done! Bus %x device %x function %x offset %x returns expected value of %x\n",bus,device,function,offset,value);
#endif
}
#endif
#ifdef CHA_COUNTS
// ===================================================================================================================
// open the MSR driver using one core in socket 0 and one core in socket 1
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
proc_in_pkg[0] = 0; // logical processor 0 is in socket 0 in all TACC systems
proc_in_pkg[1] = nr_cpus-1; // logical processor N-1 is in socket 1 in all TACC 2-socket systems
for (pkg=0; pkg<2; pkg++) {
sprintf(filename,"/dev/cpu/%d/msr",proc_in_pkg[pkg]);
msr_fd[pkg] = open(filename, O_RDWR);
if (msr_fd[pkg] == -1) {
fprintf(stderr,"ERROR %s when trying to open %s\n",strerror(errno),filename);
exit(-1);
}
}
for (pkg=0; pkg<2; pkg++) {
pread(msr_fd[pkg],&msr_val,sizeof(msr_val),IA32_TIME_STAMP_COUNTER);
fprintf(stdout,"DEBUG: TSC on core %d socket %d is %ld\n",proc_in_pkg[pkg],pkg,msr_val);
}
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x186);
printf("Core PerfEvtSel0 0x%lx\n",msr_val);
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x187);
printf("Core PerfEvtSel1 0x%lx\n",msr_val);
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x188);
printf("Core PerfEvtSel2 0x%lx\n",msr_val);
pread(msr_fd[0],&msr_val,sizeof(msr_val),0x189);
printf("Core PerfEvtSel3 0x%lx\n",msr_val);
// Program the CHA mesh counters
// Each CHA has a block of 16 MSRs reserved, of which 12 are used
// The base for each CHA is 0xE00 + 0x10*CHA
// Within each block:
// Unit Control is at offset 0x00
// CTL0, 1, 2, 3 are at offsets 0x01, 0x02, 0x03, 0x04
// CTR0, 1, 2, 3 are at offsets 0x08, 0x09, 0x0a, 0x0b
// For the moment I think I can ignore the filter registers at offsets 0x05 and 0x06
// and the status register at offset 0x07
// The control register needs bit 22 set to enabled, then bits 15:8 as Umask and 7:0 as EventSelect
// Mesh Events:
// HORZ_RING_BL_IN_USE = 0xab
// LEFT_EVEN = 0x01
// LEFT_ODD = 0x02
// RIGHT_EVEN = 0x04
// RIGHT_ODD = 0x08
// VERT_RING_BL_IN_USE = 0xaa
// UP_EVEN = 0x01
// UP_ODD = 0x02
// DN_EVEN = 0x04
// DN_ODD = 0x08
// For starters, I will combine even and odd and create 4 events
// 0x004003ab HORZ_RING_BL_IN_USE.LEFT
// 0x00400cab HORZ_RING_BL_IN_USE.RIGHT
// 0x004003aa VERT_RING_BL_IN_USE.UP
// 0x00400caa VERT_RING_BL_IN_USE.DN
// first set to try....
cha_perfevtsel[0] = 0x004003ab; // HORZ_RING_BL_IN_USE.LEFT
cha_perfevtsel[1] = 0x00400cab; // HORZ_RING_BL_IN_USE.RIGHT
cha_perfevtsel[2] = 0x004003aa; // VERT_RING_BL_IN_USE.UP
cha_perfevtsel[3] = 0x00400caa; // VERT_RING_BL_IN_USE.DN
// second set to try....
// cha_perfevtsel[0] = 0x004001ab; // HORZ_RING_BL_IN_USE.LEFT_EVEN
// cha_perfevtsel[1] = 0x004002ab; // HORZ_RING_BL_IN_USE.LEFT_ODD
// cha_perfevtsel[2] = 0x004004ab; // HORZ_RING_BL_IN_USE.RIGHT_EVEN
// cha_perfevtsel[3] = 0x004008ab; // HORZ_RING_BL_IN_USE.RIGHT_ODD
// Snoop Filter Eviction counters
cha_perfevtsel[0] = 0x0040073d; // SF_EVICTION S,E,M states
cha_perfevtsel[1] = 0x00400334; // LLC_LOOKUP.DATA_READ <-- requires CHA_FILTER0[26:17]
cha_perfevtsel[2] = 0x00400534; // LLC_LOOKUP.DATA_WRITE (WB from L2) <-- requires CHA_FILTER0[26:17]
cha_perfevtsel[3] = 0x0040af37; // LLC_VICTIMS.TOTAL (MESF) (does not count clean victims)
uint64_t cha_filter0 = 0x01e20000; // set bits 24,23,22,21,17 FMESI -- all LLC lookups, no SF lookups
printf("CHA PerfEvtSel0 0x%lx\n",cha_perfevtsel[0]);
printf("CHA PerfEvtSel1 0x%lx\n",cha_perfevtsel[1]);
printf("CHA PerfEvtSel2 0x%lx\n",cha_perfevtsel[2]);
printf("CHA PerfEvtSel3 0x%lx\n",cha_perfevtsel[3]);
printf("CHA FILTER0 0x%lx\n",cha_filter0);
#ifdef VERBOSE
printf("VERBOSE: programming CHA counters\n");
#endif
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
msr_num = 0xe00 + 0x10*tile; // box control register -- set enable bit
msr_val = 0x00400000;
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 1; // ctl0
msr_val = cha_perfevtsel[0];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 2; // ctl1
msr_val = cha_perfevtsel[1];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 3; // ctl2
msr_val = cha_perfevtsel[2];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 4; // ctl3
msr_val = cha_perfevtsel[3];
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
msr_num = 0xe00 + 0x10*tile + 5; // filter0
msr_val = cha_filter0; // bits 24:21,17 FMESI -- all LLC lookups, not not SF lookups
pwrite(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
}
}
#ifdef VERBOSE
printf("VERBOSE: finished programming CHA counters\n");
#endif
#endif
#ifdef IMC_COUNTS
// ===================================================================================================================
// Read the current programming of the IMC counters and look for the standard values (in this order)
// CAS_COUNT.READS Event 0x04, Umask 0x03
// CAS_COUNT.WRITES Event 0x04, Umask 0x0C
// ACT.ALL Event 0x01, Umask 0x0B
// PRE_COUNT.MISS Event 0x02, Umask 0x01
// DCLK
#ifdef VERBOSE
printf("Preparing to program IMC counters\n");
#endif
// expected values of IMC performance counter event select control registers
imc_perfevtsel[0] = 0x00400304; // CAS_COUNT.READS
imc_perfevtsel[1] = 0x00400C04; // CAS_COUNT.WRITES
imc_perfevtsel[2] = 0x00400B01; // ACT_COUNT.ALL
imc_perfevtsel[3] = 0x00400102; // PRE_COUNT.MISS
imc_perfevtsel[4] = 0x00400000; // DCLK
imc_vid_did[0] = 0x20428086; // all channel 0 devices are 2042
imc_vid_did[1] = 0x20468086; // all channel 1 devices are 2046
imc_vid_did[2] = 0x204a8086; // all channel 2 devices are 204a
printf("IMC PerfEvtSel0 0x%lx\n",imc_perfevtsel[0]);
printf("IMC PerfEvtSel1 0x%lx\n",imc_perfevtsel[1]);
printf("IMC PerfEvtSel2 0x%lx\n",imc_perfevtsel[2]);
printf("IMC PerfEvtSel3 0x%lx\n",imc_perfevtsel[3]);
printf("IMC PerfEvtSel4 0x%lx\n",imc_perfevtsel[4]);
// print the full wall-clock time in seconds and microseconds
// assume both components of tp struct are longs.
fprintf(stdout,"# %s\n", rcsid);
i = gettimeofday(&tp,&tzp);
fprintf(stdout,"%ld %ld\n", tp.tv_sec,tp.tv_usec);
for (socket=0; socket<NUM_SOCKETS; socket++) {
bus = IMC_BUS_Socket[socket];
#ifdef VERBOSE
printf("VERBOSE: socket %d bus %d\n",socket,bus);
#endif
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
device = IMC_Device_Channel[channel];
function = IMC_Function_Channel[channel];
#ifdef VERBOSE
printf("VERBOSE: channel %d device %d function %d\n",channel, device, function);
#endif
// check to make sure this is the correct device
offset = 0x0;
index = PCI_cfg_index(bus, device, function, offset);
value = mmconfig_ptr[index];
if ( value != imc_vid_did[channel%3]) {
fprintf(stderr,"WARNING!!!! socket %d, channel %d has vid_did %x but should be %x\n",socket,channel,value,imc_vid_did[channel%3]);
}
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
// check to see if this unit is programmed correctly and reprogram if needed
offset = IMC_PmonCtl_Offset[counter];
index = PCI_cfg_index(bus, device, function, offset);
value = mmconfig_ptr[index];
if ( value != imc_perfevtsel[counter]) {
fprintf(stderr,"WARNING!!!! socket %d, channel %d has perfevtsel %x but should be %x -- reprogramming\n",socket,channel,value,imc_perfevtsel[counter]);
mmconfig_ptr[index] = imc_perfevtsel[counter];
}
}
}
}
#endif
// ========= END OF PERFORMANCE COUNTER SETUP ========================================================================
#ifdef MAP_L3
// ============== BEGIN L3 MAPPING TESTS ==============================
// For each of the PAGES_MAPPED 2MiB pages:
// 1. Use "access()" to see if the mapping file already exists.
// If exists:
// 2. Use "stat()" to make sure the file is the correct size
// If right size:
// 3. Read the contents into the 32768-element int8_t array of L3 numbers.
// Else (wrong size):
// 4. Abort and tell the user to fix it manually.
// Else (not exists):
// 4. Call the mapping function to re-compute the map
// 5. Create mapping file
// 6. Save data in mapping file
// 7. Close output file
FILE *ptr_mapping_file;
int needs_mapping;
int good, good_old, good_new, pass1, pass2, pass3, found, numtries;
int min_count, max_count, sum_count, old_cha;
double avg_count, goodness1, goodness2, goodness3;
int globalsum = 0;
long totaltries = 0;
int NFLUSHES = 1000;
for (page_number=0; page_number<PAGES_MAPPED; page_number++) {
needs_mapping=0;
sprintf(filename,"PADDR_0x%.12lx.map",paddr_by_page[page_number]);
i = access(filename, F_OK);
if (i == -1) { // file does not exist
printf("DEBUG: Mapping file %s does not exist -- will create file after mapping cache lines\n",filename);
needs_mapping = 1;
} else { // file exists
i = access(filename, R_OK);
if (i == -1) { // file exists without read permissions
printf("ERROR: Mapping file %s exists, but without read permission\n",filename);
exit(1);
} else { // file exists with read permissions
ptr_mapping_file = fopen(filename,"r");
if (!ptr_mapping_file) {
printf("ERROR: Failed to open Mapping File %s, should not happen\n",filename);
exit(2);
}
k = fread(&cha_by_page[page_number][0],(size_t) 32768,(size_t) 1,ptr_mapping_file);
if (k != 1) { // incorrect read length
printf("ERROR: Read from Mapping File %s, returned the wrong record count %ld expected 1\n",filename,k);
exit(3);
} else { // correct read length
printf("DEBUG: Mapping File read for %s succeeded -- skipping mapping for this page\n",filename);
needs_mapping = 0;
}
}
}
if (needs_mapping == 1) {
// code imported from SystemMirrors/Hikari/MemSuite/InterventionLatency/L3_mapping.c
#ifdef VERBOSE
printf("DEBUG: here I need to perform the mapping for paddr 0x%.12lx, and then save the file\n",paddr_by_page[page_number]);
#endif
page_base_index = page_number*262144; // index of element at beginning of current 2MiB page
for (line_number=0; line_number<32768; line_number++) {
good = 0;
good_old = 0;
good_new = 0;
numtries = 0;
#ifdef VERBOSE
if (line_number%64 == 0) {
pagemapentry = get_pagemap_entry(&array[page_base_index+line_number*8]);
printf("DEBUG: page_base_index %ld line_number %ld index %ld pagemapentry 0x%lx\n",page_base_index,line_number,page_base_index+line_number*8,pagemapentry);
}
#endif
do { // -------------- Inner Repeat Loop until results pass "goodness" tests --------------
numtries++;
if (numtries > 100) {
printf("ERROR: No good results for line %d after %d tries\n",line_number,numtries);
exit(101);
}
totaltries++;
// 1. read L3 counters before starting test
for (tile=0; tile<NUM_CHA_USED; tile++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + 1; // counter 1 is the LLC_LOOKUPS.READ event
pread(msr_fd[0],&msr_val,sizeof(msr_val),msr_num);
cha_counts[0][tile][1][0] = msr_val; // use the array I have already declared for cha counts
// printf("DEBUG: page %ld line %ld msr_num 0x%x msr_val %ld cha_counter1 %lu\n",
// page_number,line_number,msr_num,msr_val,cha_counts[0][tile][1][0]);
}
// 2. Access the line many times
sum = 0;
for (i=0; i<NFLUSHES; i++) {
sum += array[page_base_index+line_number*8];
_mm_mfence();
_mm_clflush(&array[page_base_index+line_number*8]);
_mm_mfence();
}
globalsum += sum;
// 3. read L3 counters after loads are done
for (tile=0; tile<NUM_CHA_USED; tile++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + 1; // counter 1 is the LLC_LOOKUPS.READ event
pread(msr_fd[0],&msr_val,sizeof(msr_val),msr_num);
cha_counts[0][tile][1][1] = msr_val; // use the array I have already declared for cha counts
}
#ifdef VERBOSE
for (tile=0; tile<NUM_CHA_USED; tile++) {
printf("DEBUG: page %ld line %ld cha_counter1_after %lu cha_counter1 before %lu delta %lu\n",
page_number,line_number,cha_counts[0][tile][1][1],cha_counts[0][tile][1][0],cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]);
}
#endif
// CHA counter 1 set to LLC_LOOKUP.READ
//
// 4. Determine which L3 slice owns the cache line and
// 5. Save the CHA number in the cha_by_page[page][line] array
// first do a rough quantitative checks of the "goodness" of the data
// goodness1 = max/NFLUSHES (pass if >95%)
// goodness2 = min/NFLUSHES (pass if <20%)
// goodness3 = avg/NFLUSHES (pass if <40%)
max_count = 0;
min_count = 1<<30;
sum_count = 0;
for (tile=0; tile<NUM_CHA_USED; tile++) {
max_count = MAX(max_count, cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]);
min_count = MIN(min_count, cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]);
sum_count += cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0];
}
avg_count = (double)(sum_count - max_count) / (double)(NUM_CHA_USED);
goodness1 = (double) max_count / (double) NFLUSHES;
goodness2 = (double) min_count / (double) NFLUSHES;
goodness3 = avg_count / (double) NFLUSHES;
// compare the goodness parameters with manually chosen limits & combine into a single pass (good=1) or fail (good=0)
pass1 = 0;
pass2 = 0;
pass3 = 0;
if ( goodness1 > 0.95 ) pass1 = 1;
if ( goodness2 < 0.20 ) pass2 = 1;
if ( goodness3 < 0.40 ) pass3 = 1;
good_new = pass1 * pass2 * pass3;
#ifdef VERBOSE
printf("GOODNESS: line_number %ld max_count %d min_count %d sum_count %d avg_count %f goodness1 %f goodness2 %f goodness3 %f pass123 %d %d %d\n",
line_number, max_count, min_count, sum_count, avg_count, goodness1, goodness2, goodness3, pass1, pass2, pass3);
if (good_new == 0) printf("DEBUG: one or more of the sanity checks failed for line=%ld: %d %d %d goodness values %f %f %f\n",
line_number,pass1,pass2,pass3,goodness1,goodness2,goodness3);
#endif
// test to see if more than one CHA reports > 0.95*NFLUSHES events
found = 0;
old_cha = -1;
int min_counts = (NFLUSHES*19)/20;
for (tile=0; tile<NUM_CHA_USED; tile++) {
if (cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0] >= min_counts) {
old_cha = cha_by_page[page_number][line_number];
cha_by_page[page_number][line_number] = tile;
found++;
#ifdef VERBOSE
if (found > 1) {
printf("WARNING: Multiple (%d) CHAs found using counter 1 for cache line %ld, index %ld: old_cha %d new_cha %d\n",found,line_number,page_base_index+line_number*8,old_cha,cha_by_page[page_number][line_number]);
}
#endif
}
}
if (found == 0) {
good_old = 0;
#ifdef VERBOSE
printf("WARNING: no CHA entry has been found for line %ld!\n",line_number);
printf("DEBUG dump for no CHA found\n");
for (tile=0; tile<NUM_CHA_USED; tile++) {
printf("CHA %d LLC_LOOKUP.READ delta %ld\n",tile,(cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]));
}
#endif
} else if (found == 1) {
good_old = 1;
} else {
good_old = 0;
#ifdef VERBOSE
printf("DEBUG dump for multiple CHAs found\n");
for (tile=0; tile<NUM_CHA_USED; tile++) {
printf("CHA %d LLC_LOOKUP.READ delta %ld\n",tile,(cha_counts[0][tile][1][1]-cha_counts[0][tile][1][0]));
}
#endif
}
good = good_new * good_old; // trigger a repeat if either the old or new tests failed
}
while (good == 0);
#if 0
// 6. save the cache line number in the appropriate the cbo_indices[cbo][#lines] array
// 7. increment the corresponding cbo_num_lines[cbo] array entry
this_cbo = cha_by_page[page_number][line_number];
if (this_cbo == -1) {
printf("ERROR: cha_by_page[%ld][%ld] has not been set!\n",page_number,line_number);
exit(80);
}
cbo_indices[this_cbo][cbo_num_lines[this_cbo]] = line_number;
cbo_num_lines[this_cbo]++;
#endif
}
// I have not overwritten the filename, but I will rebuild it here just in case I add something stupid in between....
sprintf(filename,"PADDR_0x%.12lx.map",paddr_by_page[page_number]);
ptr_mapping_file = fopen(filename,"w");
if (!ptr_mapping_file) {
printf("ERROR: Failed to open Mapping File %s for writing -- aborting\n",filename);
exit(4);
}
// first try -- write one record of 32768 bytes
rc64 = fwrite(&cha_by_page[page_number][0],(size_t) 32768, (size_t) 1, ptr_mapping_file);
if (rc64 != 1) {
printf("ERROR: failed to write one 32768 Byte record to %s -- return code %ld\n",filename,rc64);
exit(5);
} else {
printf("SUCCESS: wrote mapping file %s\n",filename);
}
}
}
printf("DUMMY: globalsum %d\n",globalsum);
printf("VERBOSE: L3 Mapping Complete in %ld tries for %d cache lines ratio %f\n",totaltries,32768*PAGES_MAPPED,(double)totaltries/(double)(32768*PAGES_MAPPED));
#ifndef MYHUGEPAGE_1GB
// TODO!! Fix this so that it is not hard-coded for the 24-core case!!!!
//
// now that the mapping is complete, I can add up the number of lines mapped to each CHA
// be careful to count only the lines that are used, not the full 24MiB
// 3 million elements is ~11.44 2MiB pages, so count all lines in each of the first 11 pages
// If I did the arithmetic correctly, the 3 million elements uses 931328 Bytes of the 12th 2MiB page
// which is 116416 elements or 14552 cache lines.
// first accumulate the first 11 full pages
for (page_number=0; page_number<11; page_number++) {
for (line_number=0; line_number<32768; line_number++) {
lines_by_cha[cha_by_page[page_number][line_number]]++;
}
}
// then accumulate the partial 12th page
for (line_number=0; line_number<14552; line_number++) {
lines_by_cha[cha_by_page[11][line_number]]++;
}
// output
long lines_accounted = 0;
printf("LINES_BY_CHA");
for (i=0; i<NUM_CHA_USED; i++) {
printf(" %ld",lines_by_cha[i]);
lines_accounted += lines_by_cha[i];
}
printf("\n");
printf("ACCCOUNTED FOR %ld lines expected %ld lines\n",lines_accounted,l2_contained_size/8);
#endif
// ============== END L3 MAPPING TESTS ==============================
#endif
// For the snoop filter tests, I want to repeatedly read
// some number of arrays per core with an aggregate footprint
// close to 1MiB per core
// 24 cores = 24 MiB = 3 Mi elements, so
// using an array length of 3 million should be just about right 95.3674%
// l2_contained_size = arraylen; // only use if I want a large memory-contained version
inner_repetitions = 1000;
int stride = 1; // used in thread binding checks: use 2 for Dell nodes, 1 for Intel nodes
// try to pre-load the working data into the L2 caches before the initial performance counter reads
sum = 0.0;
#pragma omp parallel for reduction(+:sum)
for (i=0; i<l2_contained_size; i++) sum += array[i];
// While I am at it, I need to warm up the cores using AVX-512 code to get them to full frequency
// This may take up to 100 microseconds, or maybe 400,000 AVX512 instructions per thread.
// This is a pain because I can't trust the compiler to generate AVX512 code at any given time,
// so I have to resort to inline assembly.
tsc_start = rdtsc();
#pragma omp parallel for
for (i=0; i<CORES_USED; i++) {
for (j=0; j<10*1000*1000; j++) {
__asm__ __volatile__ (
"vpaddq %%zmm0, %%zmm1, %%zmm2\n\t"
"vpaddq %%zmm1, %%zmm2, %%zmm3\n\t"
"vpaddq %%zmm2, %%zmm3, %%zmm0\n\t"
"vpaddq %%zmm3, %%zmm0, %%zmm1"
: : : "zmm0","zmm1","zmm2","zmm3");
}
}
tsc_end = rdtsc();
printf("DEBUG: WARMUP LOOP took %lu TSC cycles\n",tsc_end - tsc_start);
// =================== BEGINNING OF PERFORMANCE COUNTER READS BEFORE KERNEL TESTING ==============================
#ifdef IMC_COUNTS
// ------------ read the initial values of the IMC counters ------------
for (socket=0; socket<NUM_SOCKETS; socket++) {
bus = IMC_BUS_Socket[socket];
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
device = IMC_Device_Channel[channel];
function = IMC_Function_Channel[channel];
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
offset = IMC_PmonCtr_Offset[counter];
index = PCI_cfg_index(bus, device, function, offset);
// read each counter twice to identify rare cases where the low-order bits
// overflow and increment the high-order bits between the two reads.
// Use the second set of values unless (( high_1 != high_0 ) && ( low_1 > low_0))
// (this indicates that the counter rolled between the 3rd and 4th reads).
low_0 = mmconfig_ptr[index];
high_0 = mmconfig_ptr[index+1];
low_1 = mmconfig_ptr[index];
high_1 = mmconfig_ptr[index+1];
if ( (high_1 != high_0) && (low_1 > low_0) ) {
count = ((uint64_t) high_0) << 32 | (uint64_t) low_0;
} else {
count = ((uint64_t) high_1) << 32 | (uint64_t) low_1;
}
imc_counts[socket][channel][counter][0] = count;
}
}
}
#if 0
// for debugging only: print initial values of IMC counts
for (socket=0; socket<NUM_SOCKETS; socket++) {
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
fprintf(stdout,"%d %d",socket,channel);
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
fprintf(stdout," %ld",imc_counts[socket][channel][counter][0]);
}
fprintf(stdout,"\n");
}
}
#endif
#endif
#ifdef CHA_COUNTS
// ------------ read the initial values of the CHA mesh counters ------------
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + counter;
pread(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
cha_counts[pkg][tile][counter][0] = msr_val;
}
}
}
#if 0
// for debugging only: print initial values of CHA counters
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
printf("Package %d, tile %d, counter %d, value %lu\n",pkg,tile,counter,cha_counts[pkg][tile][counter][0]);
}
}
}
#endif
#endif
// ------------ read the initial values of the programmable core counters ------------
#pragma omp parallel for private(counter)
for (i=0; i<CORES_USED; i++) {
#ifdef CHECK_THREAD_LOCATION
if (get_core_number() != stride*i) {
printf("ERROR: thread %d is in the wrong place %d\n",i,get_core_number());
}
#endif
for (counter=0; counter<4; counter++) {
core_counters[i][counter][0] = rdpmc(counter);
}
}
tsc_start = rdtsc();
// ================= CODE UNDER TEST BEGINS HERE ====================
#ifdef SIMPLE_OMP_LOOP
sum = 0.0;
for (k=0; k<inner_repetitions; k++) {
#pragma omp parallel for, reduction(+:sum)
for (j=0; j<l2_contained_size; j++) {
sum += array[j];
}
}
#else
#pragma omp parallel for private(j,k,iters,private_sum)
for (i=0; i<CORES_USED; i++) {
iters = 0;
fixed_counters[i][0][0] = rdpmc_instructions();
fixed_counters[i][1][0] = rdpmc_actual_cycles();
fixed_counters[i][2][0] = rdpmc_reference_cycles();
fixed_counters[i][3][0] = rdtsc();
for (k=0; k<inner_repetitions; k++) {
private_sum = ssum(&array[jstart[i]],vl[i]);
partial_sums[i] += private_sum;
iters++;
}
fixed_counters[i][0][1] = rdpmc_instructions();
fixed_counters[i][1][1] = rdpmc_actual_cycles();
fixed_counters[i][2][1] = rdpmc_reference_cycles();
fixed_counters[i][3][1] = rdtsc();
iteration_counts[i] = iters;
}
#endif
// ================ CODE UNDER TEST ENDS HERE ====================
tsc_end = rdtsc();
// use the partial sums so the optimizer will not eliminate code
for (i=0; i<CORES_USED; i++) {
sum += partial_sums[i];
}
// -------------------- read the final values of the Programmable Core counters ------------------------
#pragma omp parallel for private(counter)
for (i=0; i<CORES_USED; i++) {
#ifdef CHECK_THREAD_LOCATION
if (get_core_number() != stride*i) {
printf("ERROR: thread %d is in the wrong place %d\n",i,get_core_number());
}
#endif
for (counter=0; counter<4; counter++) {
core_counters[i][counter][1] = rdpmc(counter);
#ifdef RETRIES
// if the counter returns zero, read it one more time....
if (core_counters[i][counter][1] == SPECIAL_VALUE) {
core_counters[i][counter][1] = rdpmc(counter);
#pragma omp atomic update
retries++;
}
#endif
}
}
#ifdef CHECK_SPECIAL_VALUES
for (i=0; i<CORES_USED; i++) {
for (counter=0; counter<4; counter++) {
if (core_counters[i][counter][0] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found after loop in start count on thread %d counter %d\n",i,counter);
zeros++;
}
if (core_counters[i][counter][1] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found after loop in end count on thread %d counter %d\n",i,counter);
zeros++;
}
}
}
#endif
#ifdef CHA_COUNTS
// ------------------- read the final values of the CHA mesh counters ----------------
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
msr_num = 0xe00 + 0x10*tile + 0x8 + counter;
pread(msr_fd[pkg],&msr_val,sizeof(msr_val),msr_num);
cha_counts[pkg][tile][counter][1] = msr_val;
}
}
}
#endif
#ifdef IMC_COUNTS
// ------------------ read the final values of the IMC counters -----------------
for (socket=0; socket<NUM_SOCKETS; socket++) {
bus = IMC_BUS_Socket[socket];
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
device = IMC_Device_Channel[channel];
function = IMC_Function_Channel[channel];
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
offset = IMC_PmonCtr_Offset[counter];
index = PCI_cfg_index(bus, device, function, offset);
// read each counter twice to identify rare cases where the low-order bits
// overflow and increment the high-order bits between the two reads.
// Use the second set of values unless (( high_1 != high_0 ) && ( low_1 > low_0))
// (this indicates that the counter rolled between the 3rd and 4th reads).
low_0 = mmconfig_ptr[index];
high_0 = mmconfig_ptr[index+1];
low_1 = mmconfig_ptr[index];
high_1 = mmconfig_ptr[index+1];
if ( (high_1 != high_0) && (low_1 > low_0) ) {
count = ((uint64_t) high_0) << 32 | (uint64_t) low_0;
} else {
count = ((uint64_t) high_1) << 32 | (uint64_t) low_1;
}
imc_counts[socket][channel][counter][1] = count;
}
}
}
#endif
// ================================== END OF PERFORMANCE COUNTER READS AFTER TEST ==============================================
t0 = 0.0;
t1 = (double) (tsc_end - tsc_start) / TSC_GHz / 1.0e9;
printf("Instrumented code required %f seconds to execute\n",t1-t0);
bandwidth = sizeof(double)*(double)l2_contained_size*(double)inner_repetitions / (t1-t0) / 1e9;
printf("Bandwidth %f GB/s\n",bandwidth);
printf("Bandwidth per core %f GB/s\n",bandwidth/(double)CORES_USED);
printf("Approx Bytes/cycle per core %f\n",bandwidth/(double)CORES_USED/2.0);
expected = (double)l2_contained_size * (double)(inner_repetitions) / (double)CORES_USED;
avg_cycles = (double)(tsc_end - tsc_start) / expected;
printf("Average TSC cycles per element %f\n",avg_cycles);
// clear the arrays for the package-level sums
for (pkg=0; pkg<2; pkg++) {
for (counter=0; counter<4; counter++) { // no point in summing the cycle counts, so exclude counter 4
core_pkg_sums[pkg][counter] = 0;
fixed_pkg_sums[pkg][counter] = 0;
imc_pkg_sums[pkg][counter] = 0;
cha_pkg_sums[pkg][counter] = 0;
}
}
// compute core package sums and optional print
for (i=0; i<CORES_USED; i++) {
for (counter=0; counter<4; counter++) {
delta = corrected_pmc_delta(fixed_counters[i][counter][1],fixed_counters[i][counter][0],fixed_pmc_width);
fixed_pkg_sums[0][counter] += delta;
}
for (counter=0; counter<4; counter++) {
#ifdef CHECK_SPECIAL_VALUES
if (core_counters[i][counter][0] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found in post-processing in start count on thread %d counter %d\n",i,counter);
}
if (core_counters[i][counter][1] == SPECIAL_VALUE) {
printf("DEBUG: SPECIAL_VALUE found in post-processing in end count on thread %d counter %d\n",i,counter);
}
#endif
delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width);
#ifdef VERBOSE
printf("CORE %d counter %d end %ld start %ld delta %ld\n",i,counter,core_counters[i][counter][1],core_counters[i][counter][0],delta);
#endif
core_pkg_sums[0][counter] += delta;
}
}
if (dumpall == 1) {
report = 0;
for (i=0; i<CORES_USED; i++) {
for (counter=0; counter<4; counter++) {
delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width);
printf("CORE %d counter %d end %ld start %ld delta %ld\n",i,counter,core_counters[i][counter][1],core_counters[i][counter][0],delta);
}
}
}
report = 1;
dumpall = 0;
#ifdef CHA_COUNTS
// print out the differences and compute sums of differences
for (pkg=0; pkg<2; pkg++) {
for (tile=0; tile<NUM_CHA_USED; tile++) {
for (counter=0; counter<4; counter++) {
delta = corrected_pmc_delta(cha_counts[pkg][tile][counter][1],cha_counts[pkg][tile][counter][0],uncore_pmc_width);
#ifdef VERBOSE
printf("CHA pkg %d tile %d counter %d delta %ld\n",pkg,tile,counter,delta);
#endif
cha_pkg_sums[pkg][counter] += delta;
}
}
}
#endif
#ifdef IMC_COUNTS
for (pkg=0; pkg<2; pkg++) {
for (channel=0; channel<NUM_IMC_CHANNELS; channel++) {
for (counter=0; counter<NUM_IMC_COUNTERS; counter++) {
delta = corrected_pmc_delta(imc_counts[pkg][channel][counter][1],imc_counts[pkg][channel][counter][0],uncore_pmc_width);
#ifdef VERBOSE
printf("IMC pkg %d channel %d counter %d delta %ld\n",pkg,channel,counter,delta);
#endif
imc_pkg_sums[pkg][counter] += delta;
}
}
}
#endif
int max_display_pkg = 1;
printf("Expected AVX512 arithmetic instructions (Event 0xC7, Umask 0x40) %ld\n",l2_contained_size*inner_repetitions/8);
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) {
printf("CORE_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,core_pkg_sums[pkg][counter]);
}
}
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) {
printf("FIXED_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,fixed_pkg_sums[pkg][counter]);
}
}
// the fixed-function counters are measured inside the OpenMP loop, so they should not be contaminated by
// spin-waiting....
// Compute per-core metrics here -- note that the fixed-function counter set is (Instr, CoreCyc, RefCyc, TSC)
// Utilization = RefCyc/TSC (fixed2/fixed3)
// AvgGHz_unhalted = CoreCyc/RefCyc * 2.1 (fixed1/fixed2 * 2.1)
// AvgGHz_wall = CoreCyc/TSC * 2.1 (fixed1/fixed3 * 2.1)
// IPC = Instr/CoreCyc (fixed0/fixed1)
long delta_inst, delta_core, delta_ref, delta_tsc;
double utilization, avg_ghz, ipc;
printf("CORE_UTILIZATION ");
for (i=0; i<CORES_USED; i++) {
delta_ref = corrected_pmc_delta(fixed_counters[i][2][1],fixed_counters[i][2][0],fixed_pmc_width);
delta_tsc = corrected_pmc_delta(fixed_counters[i][3][1],fixed_counters[i][3][0],fixed_pmc_width);
utilization = (double)delta_ref / (double)delta_tsc;
printf("%6.4f ",utilization);
}
printf("\n");
printf("CORE_GHZ ");
for (i=0; i<CORES_USED; i++) {
delta_core = corrected_pmc_delta(fixed_counters[i][1][1],fixed_counters[i][1][0],fixed_pmc_width);
delta_ref = corrected_pmc_delta(fixed_counters[i][2][1],fixed_counters[i][2][0],fixed_pmc_width);
avg_ghz = (double)delta_core / (double)delta_ref * TSC_GHz;
printf("%6.4f ",avg_ghz);
}
printf("\n");
printf("CORE_IPC ");
for (i=0; i<CORES_USED; i++) {
delta_inst = corrected_pmc_delta(fixed_counters[i][0][1],fixed_counters[i][0][0],fixed_pmc_width);
delta_core = corrected_pmc_delta(fixed_counters[i][1][1],fixed_counters[i][1][0],fixed_pmc_width);
ipc = (double)delta_inst / (double)delta_core;
printf("%6.4f ",ipc);
}
printf("\n");
printf("THREAD_EXECUTION_TIME ");
for (i=0; i<CORES_USED; i++) {
delta_tsc = corrected_pmc_delta(fixed_counters[i][3][1],fixed_counters[i][3][0],fixed_pmc_width);
t0 = (double)delta_tsc / (TSC_GHz*1.0e9);
printf("%f ",t0);
}
printf("\n");
#ifdef CHA_COUNTS
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) {
printf("CHA_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,cha_pkg_sums[pkg][counter]);
}
}
#endif
#ifdef IMC_COUNTS
for (pkg=0; pkg<max_display_pkg; pkg++) {
for (counter=0; counter<4; counter++) { // no point in summing the cycle counts, so exclude counter 4
printf("IMC_PKG_SUMS pkg %d counter %d sum_delta %ld\n",pkg,counter,imc_pkg_sums[pkg][counter]);
}
}
#endif
// for the Snoop Filter set
// expected = expected number of cache lines loaded from L2
// sf_evict_rate = #evictions / expected number of loads
expected = 8.0/64.0* (double)l2_contained_size * (double) inner_repetitions;
sf_evict_rate = (double) cha_pkg_sums[0][0] / expected;
printf("SnoopFilterEvictionRate %f\n",sf_evict_rate);
expected = (double)l2_contained_size * (double) (inner_repetitions+1); // adjusted for pre-load of data
printf("Dummy Sum value is %f, expected value %f\n",sum,expected);
expected = (double)l2_contained_size * (double) inner_repetitions;
printf("Expected number of cache lines loaded from L2 %f\n",expected/8.0);
printf("Number of performance counter wraprounds detected %d\n",nwraps);
#ifdef RETRIES
printf("Number of core performance counter reads retried %d\n",retries);
#endif
printf("Number of zero values found in the inner loop %d\n",zeros);
// printf("Expected Number of Loads for AVX2 code %ld\n",arraylen/4);
// printf("Expected Number of Cache Lines loaded %ld\n",arraylen/8);
for (i=0; i<CORES_USED; i++) {
if (iteration_counts[i] != inner_repetitions) {
printf("ERROR: thread %d iteration_counts %ld expected %ld\n",i,iteration_counts[i],inner_repetitions);
}
}
// per-core performance counter values
for (counter=0; counter<4; counter++) {
printf("CORE_counter %d ",counter);
for (i=0; i<CORES_USED; i++) {
delta = corrected_pmc_delta(core_counters[i][counter][1],core_counters[i][counter][0],core_pmc_width);
printf("%ld ",delta);
}
printf("\n");
}
// per-CHA performance counter values -- socket 0 only
for (counter=0; counter<4; counter++) {
printf("CHA_counter %d ",counter);
for (i=0; i<NUM_CHA_USED; i++) {
delta = corrected_pmc_delta(cha_counts[0][i][counter][1],cha_counts[0][i][counter][0],uncore_pmc_width);
printf("%ld ",delta);
}
printf("\n");
}
printf("Double-check physical address of base of array\n");
pagemapentry = get_pagemap_entry(&array[0]);
printf(" array[0] va 0x%.16lx pagemapentry 0x%.16lx\n",&array[0],pagemapentry);
}
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% John Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite-private.h"
#include "magick/distribute-cache-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/option.h"
#include "magick/pixel.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/policy.h"
#include "magick/quantum.h"
#include "magick/random_.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/utility.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const IndexPacket
*GetVirtualIndexesFromCache(const Image *);
static const PixelPacket
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,
PixelPacket *,ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,PixelPacket *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
ReadPixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *),
ReadPixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCacheIndexes(CacheInfo *,NexusInfo *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *,NexusInfo *,ExceptionInfo *);
static PixelPacket
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *,const MapMode,
const RectangleInfo *,const MagickBooleanType,NexusInfo *,ExceptionInfo *)
magick_hot_spot;
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static volatile MagickBooleanType
instantiate_cache = MagickFalse;
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*restrict cache_info;
char
*synchronize;
cache_info=(CacheInfo *) AcquireQuantumMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->channels=4;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
cache_info->semaphore=AllocateSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AllocateSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickExport NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
nexus_info[0]=(NexusInfo *) AcquireQuantumMemory(number_threads,
sizeof(**nexus_info));
if (nexus_info[0] == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(nexus_info[0],0,number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) number_threads; i++)
{
nexus_info[i]=(&nexus_info[0][i]);
nexus_info[i]->signature=MagickSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% const void *AcquirePixelCachePixels(const Image *image,
% MagickSizeType *length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const void *AcquirePixelCachePixels(const Image *image,
MagickSizeType *length,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((const void *) NULL);
*length=cache_info->length;
return((const void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickExport MagickBooleanType CacheComponentGenesis(void)
{
AcquireSemaphoreInfo(&cache_semaphore);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickExport void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
AcquireSemaphoreInfo(&cache_semaphore);
LockSemaphoreInfo(cache_semaphore);
instantiate_cache=MagickFalse;
UnlockSemaphoreInfo(cache_semaphore);
DestroySemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,image_nexus[0],
exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->clip_mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],exception);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
if (GetPixelIntensity(image,r) > (QuantumRange/2))
{
SetPixelRed(q,GetPixelRed(p));
SetPixelGreen(q,GetPixelGreen(p));
SetPixelBlue(q,GetPixelBlue(p));
SetPixelOpacity(q,GetPixelOpacity(p));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
}
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*restrict clone_info;
const CacheInfo
*restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
if (clone_info == (Cache) NULL)
return((Cache) NULL);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickExport void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*restrict cache_info,
*restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickSizeType MagickMin(const MagickSizeType x,
const MagickSizeType y)
{
if (x < y)
return(x);
return(y);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *restrict clone_info,CacheInfo *restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads 2
#define cache_threads(source,destination,chunk) \
num_threads((chunk) < (16*GetMagickResourceLimit(ThreadResource)) ? 1 : \
GetMagickResourceLimit(ThreadResource) < MaxCacheThreads ? \
GetMagickResourceLimit(ThreadResource) : MaxCacheThreads)
MagickBooleanType
status;
NexusInfo
**restrict cache_nexus,
**restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->active_index_channel == clone_info->active_index_channel))
{
/*
Identical pixel cache morphology.
*/
(void) memcpy(clone_info->pixels,cache_info->pixels,cache_info->columns*
cache_info->rows*sizeof(*cache_info->pixels));
if (cache_info->active_index_channel != MagickFalse)
(void) memcpy(clone_info->indexes,cache_info->indexes,
cache_info->columns*cache_info->rows*sizeof(*cache_info->indexes));
return(MagickTrue);
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
clone_nexus=AcquirePixelCacheNexus(MaxCacheThreads);
if ((cache_nexus == (NexusInfo **) NULL) ||
(clone_nexus == (NexusInfo **) NULL))
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->pixels);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length);
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->active_index_channel != MagickFalse) &&
(clone_info->active_index_channel != MagickFalse))
{
/*
Clone indexes.
*/
length=(size_t) MagickMin(cache_info->columns,clone_info->columns)*
sizeof(*cache_info->indexes);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
cache_threads(cache_info,clone_info,cache_info->rows)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
PixelPacket
*pixels;
RectangleInfo
region;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
region.width=cache_info->columns;
region.height=1;
region.x=0;
region.y=y;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,MagickTrue,
cache_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
status=ReadPixelCacheIndexes(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
region.width=clone_info->columns;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,®ion,MagickTrue,
clone_nexus[id],exception);
if (pixels == (PixelPacket *) NULL)
continue;
(void) memcpy(clone_nexus[id]->indexes,cache_nexus[id]->indexes,length);
status=WritePixelCacheIndexes(clone_info,clone_nexus[id],exception);
}
}
cache_nexus=DestroyPixelCacheNexus(cache_nexus,MaxCacheThreads);
clone_nexus=DestroyPixelCacheNexus(clone_nexus,MaxCacheThreads);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache == (void *) NULL)
return;
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(PixelPacket *) RelinquishAlignedMemory(
cache_info->pixels);
else
cache_info->pixels=(PixelPacket *) UnmapBlob(cache_info->pixels,
(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
cache_info->pixels=(PixelPacket *) UnmapBlob(cache_info->pixels,(size_t)
cache_info->length);
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if (cache_info->mode != ReadMode)
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->indexes=(IndexPacket *) NULL;
}
MagickExport Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MaxTextExtent];
(void) FormatLocaleString(message,MaxTextExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickSignature);
cache_info=(CacheInfo *) RelinquishMagickMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(PixelPacket *) NULL;
nexus_info->pixels=(PixelPacket *) NULL;
nexus_info->indexes=(IndexPacket *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickExport NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) number_threads; i++)
{
if (nexus_info[i]->cache != (PixelPacket *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickSignature);
}
nexus_info[0]=(NexusInfo *) RelinquishMagickMemory(nexus_info[0]);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetAuthenticPixelsCache().
%
% The format of the GetAuthenticIndexesFromCache() method is:
%
% IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static IndexPacket *GetAuthenticIndexesFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticIndexQueue() returns the authentic black channel or the colormap
% indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetAuthenticIndexQueue() method is:
%
% IndexPacket *GetAuthenticIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport IndexPacket *GetAuthenticIndexQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
return(cache_info->methods.get_authentic_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% PixelPacket *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
PixelPacket
*restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((PixelPacket *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
if (cache_info->active_index_channel != MagickFalse)
if (ReadPixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse)
return((PixelPacket *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% PixelPacket *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static PixelPacket *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% PixelPacket *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport PixelPacket *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a PixelPacket array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or if the storage class is
% PseduoClass, call GetAuthenticIndexQueue() after invoking
% GetAuthenticPixels() to obtain the black color component or colormap indexes
% (of type IndexPacket) corresponding to the region. Once the PixelPacket
% (and/or IndexPacket) array has been updated, the changes must be saved back
% to the underlying image using SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
return(cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated with the
% last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *restrict image)
{
CacheInfo
*restrict cache_info;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cpu_throttle = 0,
cycles = 0,
time_limit = 0;
static time_t
cache_timestamp = 0;
status=MagickTrue;
LockSemaphoreInfo(image->semaphore);
if (cpu_throttle == 0)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != MagickResourceInfinity) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (time_limit == 0)
{
/*
Set the expire time in seconds.
*/
time_limit=GetMagickResourceLimit(TimeResource);
cache_timestamp=time((time_t *) NULL);
}
if ((time_limit != MagickResourceInfinity) &&
((MagickSizeType) (time((time_t *) NULL)-cache_timestamp) >= time_limit))
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AllocateSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status != MagickFalse)
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status != MagickFalse)
{
if (cache_info->reference_count == 1)
cache_info->nexus_info=(NexusInfo **) NULL;
destroy=MagickTrue;
image->cache=clone_image.cache;
}
}
DestroySemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->taint=MagickTrue;
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->type == DiskCache)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MapCache, MemoryCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetPixelCacheType(const Image *image)
{
return(GetImagePixelCacheType(image));
}
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
PixelPacket
*restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,
pixel,exception));
pixels=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,PixelPacket *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M a g i c k P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMagickPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMagickPixel() method is:
%
% MagickBooleanType GetOneVirtualMagickPixel(const Image image,
% const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
% ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMagickPixel(const Image *image,
const ssize_t x,const ssize_t y,MagickPixelPacket *pixel,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
GetMagickPixelPacket(image,pixel);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
indexes=GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]);
SetMagickPixelPacket(image,pixels,indexes,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l M e t h o d P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualMethodPixel() returns a single pixel at the specified (x,y)
% location as defined by specified pixel method. The image background color
% is returned if an error occurs. If you plan to modify the pixel, use
% GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualMethodPixel() method is:
%
% MagickBooleanType GetOneVirtualMethodPixel(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,Pixelpacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualMethodPixel(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
virtual_pixel_method,x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,PixelPacket *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*pixel=image->background_color;
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
pixels=GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelPacket method,const ssize_t x,const ssize_t y,
% PixelPacket *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelPacket *pixel,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
const PixelPacket
*restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
*pixel=image->background_color;
pixels=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (pixels == (const PixelPacket *) NULL)
return(MagickFalse);
*pixel=(*pixels);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheChannels() returns the number of pixel channels associated
% with this instance of the pixel cache.
%
% The format of the GetPixelCacheChannels() method is:
%
% size_t GetPixelCacheChannels(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheChannels returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport size_t GetPixelCacheChannels(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->channels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickExport ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) ResetMagickMemory(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_indexes_from_handler=GetVirtualIndexesFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_indexes_from_handler=
GetAuthenticIndexesFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated with
% the last call to SetPixelCacheNexusPixels() or GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickExport MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=cache_info->length;
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickExport ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimize cache tile width in pixels.
%
% o height: the optimize cache tile height in pixels.
%
*/
MagickExport void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*width=2048UL/sizeof(PixelPacket);
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/sizeof(PixelPacket);
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromCache() returns the indexes associated with the last
% call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualIndexesFromCache() method is:
%
% IndexPacket *GetVirtualIndexesFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const IndexPacket *GetVirtualIndexesFromCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l I n d e x e s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexesFromNexus() returns the indexes associated with the
% specified cache nexus.
%
% The format of the GetVirtualIndexesFromNexus() method is:
%
% const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap indexes.
%
*/
MagickExport const IndexPacket *GetVirtualIndexesFromNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->storage_class == UndefinedClass)
return((IndexPacket *) NULL);
return(nexus_info->indexes);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l I n d e x Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualIndexQueue() returns the virtual black channel or the
% colormap indexes associated with the last call to QueueAuthenticPixels() or
% GetVirtualPixels(). NULL is returned if the black channel or colormap
% indexes are not available.
%
% The format of the GetVirtualIndexQueue() method is:
%
% const IndexPacket *GetVirtualIndexQueue(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const IndexPacket *GetVirtualIndexQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
return(cache_info->methods.get_virtual_indexes_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualIndexesFromNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsFromNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelsFromNexus() method is:
%
% PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
/*
VirtualPixelModulo() computes the remainder of dividing offset by extent. It
returns not only the quotient (tile the offset falls in) but also the positive
remainer within that tile such that 0 <= remainder < extent. This method is
essentially a ldiv() using a floored modulo division rather than the normal
default truncated modulo division.
*/
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/(ssize_t) extent;
if (offset < 0L)
modulo.quotient--;
modulo.remainder=offset-modulo.quotient*(ssize_t) extent;
return(modulo);
}
MagickExport const PixelPacket *GetVirtualPixelsFromNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
IndexPacket
virtual_index;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
**restrict virtual_nexus;
PixelPacket
*restrict pixels,
virtual_pixel;
RectangleInfo
region;
register const IndexPacket
*restrict virtual_indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
u,
v;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return((const PixelPacket *) NULL);
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
if (pixels == (PixelPacket *) NULL)
return((const PixelPacket *) NULL);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns) <= (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows) <= (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
if ((cache_info->storage_class == PseudoClass) ||
(cache_info->colorspace == CMYKColorspace))
{
status=ReadPixelCacheIndexes(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const PixelPacket *) NULL);
}
return(pixels);
}
/*
Pixel request is outside cache extents.
*/
q=pixels;
indexes=nexus_info->indexes;
virtual_nexus=AcquirePixelCacheNexus(1);
if (virtual_nexus == (NexusInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"UnableToGetCacheNexus","`%s'",image->filename);
return((const PixelPacket *) NULL);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case GrayVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange/2);
SetPixelGreen(&virtual_pixel,QuantumRange/2);
SetPixelBlue(&virtual_pixel,QuantumRange/2);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
case TransparentVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,0);
SetPixelGreen(&virtual_pixel,0);
SetPixelBlue(&virtual_pixel,0);
SetPixelOpacity(&virtual_pixel,TransparentOpacity);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
SetPixelRed(&virtual_pixel,QuantumRange);
SetPixelGreen(&virtual_pixel,QuantumRange);
SetPixelBlue(&virtual_pixel,QuantumRange);
SetPixelOpacity(&virtual_pixel,OpaqueOpacity);
break;
}
default:
{
virtual_pixel=image->background_color;
break;
}
}
virtual_index=0;
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=(&virtual_pixel);
virtual_indexes=(&virtual_index);
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,*virtual_nexus,
exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
*virtual_nexus,exception);
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,
*virtual_nexus);
break;
}
}
if (p == (const PixelPacket *) NULL)
break;
*q++=(*p);
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
*indexes++=(*virtual_indexes);
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelsFromNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,*virtual_nexus,exception);
if (p == (const PixelPacket *) NULL)
break;
virtual_indexes=GetVirtualIndexesFromNexus(cache_info,*virtual_nexus);
(void) memcpy(q,p,(size_t) length*sizeof(*p));
q+=length;
if ((indexes != (IndexPacket *) NULL) &&
(virtual_indexes != (const IndexPacket *) NULL))
{
(void) memcpy(indexes,virtual_indexes,(size_t) length*
sizeof(*virtual_indexes));
indexes+=length;
}
}
}
virtual_nexus=DestroyPixelCacheNexus(virtual_nexus,1);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const PixelPacket *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const PixelPacket *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated with the
% last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const PixelPacket *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const PixelPacket *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access
% the black color component or to obtain the colormap indexes (of type
% IndexPacket) corresponding to the region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const PixelPacket *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const PixelPacket *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsFromNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated with the last call
% to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% PixelPacket *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const PixelPacket *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const IndexPacket *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickExport const PixelPacket *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *nexus_info)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->storage_class == UndefinedClass)
return((PixelPacket *) NULL);
return((const PixelPacket *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void MagickPixelCompositeMask(const MagickPixelPacket *p,
const MagickRealType alpha,const MagickPixelPacket *q,
const MagickRealType beta,MagickPixelPacket *composite)
{
double
gamma;
if (alpha == TransparentOpacity)
{
*composite=(*q);
return;
}
gamma=1.0-QuantumScale*QuantumScale*alpha*beta;
gamma=PerceptibleReciprocal(gamma);
composite->red=gamma*MagickOver_(p->red,alpha,q->red,beta);
composite->green=gamma*MagickOver_(p->green,alpha,q->green,beta);
composite->blue=gamma*MagickOver_(p->blue,alpha,q->blue,beta);
if ((p->colorspace == CMYKColorspace) && (q->colorspace == CMYKColorspace))
composite->index=gamma*MagickOver_(p->index,alpha,q->index,beta);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickPixelPacket
alpha,
beta;
MagickSizeType
number_pixels;
NexusInfo
**restrict clip_nexus,
**restrict image_nexus;
register const PixelPacket
*restrict r;
register IndexPacket
*restrict nexus_indexes,
*restrict indexes;
register PixelPacket
*restrict p,
*restrict q;
register ssize_t
i;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->mask == (Image *) NULL)
return(MagickFalse);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
image_nexus=AcquirePixelCacheNexus(1);
clip_nexus=AcquirePixelCacheNexus(1);
if ((image_nexus == (NexusInfo **) NULL) ||
(clip_nexus == (NexusInfo **) NULL))
ThrowBinaryException(CacheError,"UnableToGetCacheNexus",image->filename);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,
nexus_info->region.y,nexus_info->region.width,nexus_info->region.height,
image_nexus[0],exception);
indexes=image_nexus[0]->indexes;
q=nexus_info->pixels;
nexus_indexes=nexus_info->indexes;
r=GetVirtualPixelsFromNexus(image->mask,MaskVirtualPixelMethod,
nexus_info->region.x,nexus_info->region.y,nexus_info->region.width,
nexus_info->region.height,clip_nexus[0],&image->exception);
GetMagickPixelPacket(image,&alpha);
GetMagickPixelPacket(image,&beta);
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (i=0; i < (ssize_t) number_pixels; i++)
{
if ((p == (PixelPacket *) NULL) || (r == (const PixelPacket *) NULL))
break;
SetMagickPixelPacket(image,p,indexes+i,&alpha);
SetMagickPixelPacket(image,q,nexus_indexes+i,&beta);
MagickPixelCompositeMask(&beta,GetPixelIntensity(image,r),&alpha,
alpha.opacity,&beta);
SetPixelRed(q,ClampToQuantum(beta.red));
SetPixelGreen(q,ClampToQuantum(beta.green));
SetPixelBlue(q,ClampToQuantum(beta.blue));
SetPixelOpacity(q,ClampToQuantum(beta.opacity));
if (cache_info->active_index_channel != MagickFalse)
SetPixelIndex(nexus_indexes+i,GetPixelIndex(indexes+i));
p++;
q++;
r++;
}
clip_nexus=DestroyPixelCacheNexus(clip_nexus,1);
image_nexus=DestroyPixelCacheNexus(image_nexus,1);
if (i < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% colormap indexes, and memory mapping the cache if it is disk based. The
% cache nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void AllocatePixelCachePixels(CacheInfo *cache_info)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(PixelPacket *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
}
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if (cache_info->file != -1)
return(MagickTrue); /* cache already open */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
cache_info->file=file;
cache_info->mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
message[MaxTextExtent];
(void) FormatMagickSize(length,MagickFalse,format);
(void) FormatLocaleString(message,MaxTextExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
return(MagickTrue);
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) "");
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
{
int
status;
status=posix_fallocate(cache_info->file,offset+1,extent-offset);
if (status != 0)
return(MagickFalse);
}
#endif
return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info,
source_info;
char
format[MaxTextExtent],
message[MaxTextExtent];
const char
*type;
MagickSizeType
length,
number_pixels;
MagickStatusType
status;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MaxTextExtent,"%s[%.20g]",
image->filename,(double) GetImageIndexInList(image));
cache_info->mode=mode;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
cache_info->channels=image->channels;
cache_info->active_index_channel=((image->storage_class == PseudoClass) ||
(image->colorspace == CMYKColorspace)) ? MagickTrue : MagickFalse;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
cache_info->pixels=(PixelPacket *) NULL;
cache_info->indexes=(IndexPacket *) NULL;
cache_info->length=0;
return(MagickTrue);
}
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
packet_size+=sizeof(IndexPacket);
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if (cache_info->columns != columns)
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
status=AcquireMagickResource(AreaResource,cache_info->length);
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (((cache_info->type == UndefinedCache) && (status != MagickFalse)) ||
(cache_info->type == MemoryCache))
{
AllocatePixelCachePixels(cache_info);
if (cache_info->pixels == (PixelPacket *) NULL)
cache_info->pixels=source_info.pixels;
else
{
/*
Create memory pixel cache.
*/
cache_info->colorspace=image->colorspace;
cache_info->type=MemoryCache;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status|=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",
type,(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
return(MagickTrue);
}
}
RelinquishMagickResource(MemoryResource,cache_info->length);
}
/*
Create pixel cache on disk.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if ((status == MagickFalse) || (cache_info->type == DistributedCache))
{
DistributeCacheInfo
*server_info;
if (cache_info->type == DistributedCache)
RelinquishMagickResource(DiskResource,cache_info->length);
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
cache_info->type=DistributedCache;
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MaxTextExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,
format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,GetDistributeCacheFile(
(DistributeCacheInfo *) cache_info->server_info),type,
(double) cache_info->columns,(double) cache_info->rows,
format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(MagickTrue);
}
}
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
RelinquishMagickResource(DiskResource,cache_info->length);
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
length=number_pixels*(sizeof(PixelPacket)+sizeof(IndexPacket));
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if ((status == MagickFalse) && (cache_info->type != MapCache) &&
(cache_info->type != MemoryCache))
cache_info->type=DiskCache;
else
{
cache_info->pixels=(PixelPacket *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (PixelPacket *) NULL)
{
cache_info->pixels=source_info.pixels;
cache_info->type=DiskCache;
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
cache_info->indexes=(IndexPacket *) (cache_info->pixels+
number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,(double)
cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
return(MagickTrue);
}
}
RelinquishMagickResource(MapResource,cache_info->length);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MaxTextExtent,
"open %s (%s[%d], %s, %.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info,
*restrict clone_info;
Image
clone_image;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) &&
(cache_info->reference_count == 1))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->mode != ReadMode) && (cache_info->type != MemoryCache) &&
(cache_info->reference_count == 1))
{
int
status;
/*
Usurp existing persistent pixel cache.
*/
status=rename_utf8(cache_info->cache_filename,filename);
if (status == 0)
{
(void) CopyMagickString(cache_info->cache_filename,filename,
MaxTextExtent);
*offset+=cache_info->length+page_size-(cache_info->length %
page_size);
UnlockSemaphoreInfo(cache_info->semaphore);
cache_info=(CacheInfo *) ReferencePixelCache(cache_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"Usurp resident persistent cache");
return(MagickTrue);
}
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
/*
Clone persistent pixel cache.
*/
clone_image=(*image);
clone_info=(CacheInfo *) clone_image.cache;
image->cache=ClonePixelCache(cache_info);
cache_info=(CacheInfo *) ReferencePixelCache(image->cache);
(void) CopyMagickString(cache_info->cache_filename,filename,MaxTextExtent);
cache_info->type=DiskCache;
cache_info->offset=(*offset);
cache_info=(CacheInfo *) image->cache;
status=OpenPixelCache(image,IOMode,exception);
if (status != MagickFalse)
status=ClonePixelCacheRepository(cache_info,clone_info,&image->exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixel(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,clone,nexus_info,
exception));
}
MagickExport PixelPacket *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
PixelPacket
*restrict pixels;
RectangleInfo
region;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((PixelPacket *) NULL);
assert(cache_info->signature == MagickSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((PixelPacket *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((PixelPacket *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((PixelPacket *) NULL);
/*
Return pixel cache.
*/
region.x=x;
region.y=y;
region.width=columns;
region.height=rows;
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,®ion,
(image->clip_mask != (Image *) NULL) || (image->mask != (Image *) NULL) ?
MagickTrue : MagickFalse,nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static PixelPacket *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a PixelPacket array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% PixelPacket. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain
% the black color component or the colormap indexes (of type IndexPacket)
% corresponding to the region. Once the PixelPacket (and/or IndexPacket)
% array has been updated, the changes must be saved back to the underlying
% image using SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport PixelPacket *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
return(cache_info->methods.queue_authentic_pixels_handler(image,x,y,columns,
rows,exception));
assert(id < (int) cache_info->number_threads);
return(QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheIndexes() reads colormap indexes from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheIndexes() method is:
%
% MagickBooleanType ReadPixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,
(MagickSizeType) SSIZE_MAX),(off_t) (offset+i));
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheIndexes(CacheInfo *restrict cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register IndexPacket
*restrict q;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*restrict p;
/*
Read indexes from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read indexes from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*q),length,(unsigned char *) q);
if ((MagickSizeType) count < length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read indexes from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(CacheInfo *restrict cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register PixelPacket
*restrict q;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
q=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->columns;
q+=nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*q),length,(unsigned char *) q);
if ((MagickSizeType) count < length)
break;
offset+=cache_info->columns;
q+=nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickExport Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickExport void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_indexes_from_handler !=
(GetVirtualIndexesFromHandler) NULL)
cache_info->methods.get_virtual_indexes_from_handler=
cache_methods->get_virtual_indexes_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_indexes_from_handler !=
(GetAuthenticIndexesFromHandler) NULL)
cache_info->methods.get_authentic_indexes_from_handler=
cache_methods->get_authentic_indexes_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% PixelPacket SetPixelCacheNexusPixels(const CacheInfo *cache_info,
% const MapMode mode,const RectangleInfo *region,
% const MagickBooleanType buffered,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o region: A pointer to the RectangleInfo structure that defines the
% region of this particular cache nexus.
%
% o buffered: pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *restrict cache_info,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
if (nexus_info->length != (MagickSizeType) ((size_t) nexus_info->length))
return(MagickFalse);
nexus_info->mapped=MagickFalse;
nexus_info->cache=(PixelPacket *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) nexus_info->length));
if (nexus_info->cache == (PixelPacket *) NULL)
{
nexus_info->mapped=MagickTrue;
nexus_info->cache=(PixelPacket *) MapBlob(-1,IOMode,0,(size_t)
nexus_info->length);
}
if (nexus_info->cache == (PixelPacket *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsAuthenticPixelCache(
const CacheInfo *restrict cache_info,const NexusInfo *restrict nexus_info)
{
MagickBooleanType
status;
MagickOffsetType
offset;
/*
Does nexus pixels point directly to in-core cache pixels or is it buffered?
*/
if (cache_info->type == PingCache)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
status=nexus_info->pixels == (cache_info->pixels+offset) ? MagickTrue :
MagickFalse;
return(status);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels,0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels,1,1);
}
static PixelPacket *SetPixelCacheNexusPixels(const CacheInfo *cache_info,
const MapMode mode,const RectangleInfo *region,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return((PixelPacket *) NULL);
nexus_info->region=(*region);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
ssize_t
x,
y;
x=nexus_info->region.x+(ssize_t) nexus_info->region.width-1;
y=nexus_info->region.y+(ssize_t) nexus_info->region.height-1;
if (((nexus_info->region.x >= 0) && (x < (ssize_t) cache_info->columns) &&
(nexus_info->region.y >= 0) && (y < (ssize_t) cache_info->rows)) &&
((nexus_info->region.height == 1UL) || ((nexus_info->region.x == 0) &&
((nexus_info->region.width == cache_info->columns) ||
((nexus_info->region.width % cache_info->columns) == 0)))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
nexus_info->pixels=cache_info->pixels+offset;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=cache_info->indexes+offset;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,nexus_info);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
length=number_pixels*sizeof(PixelPacket);
if (cache_info->active_index_channel != MagickFalse)
length+=number_pixels*sizeof(IndexPacket);
if (nexus_info->cache == (PixelPacket *) NULL)
{
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
nexus_info->length=length;
status=AcquireCacheNexusPixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
{
nexus_info->length=0;
return((PixelPacket *) NULL);
}
}
nexus_info->pixels=nexus_info->cache;
nexus_info->indexes=(IndexPacket *) NULL;
if (cache_info->active_index_channel != MagickFalse)
nexus_info->indexes=(IndexPacket *) (nexus_info->pixels+number_pixels);
PrefetchPixelCacheNexusPixels(nexus_info,mode);
nexus_info->authentic_pixel_cache=IsAuthenticPixelCache(cache_info,nexus_info);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,
const Quantum opacity)
{
CacheInfo
*restrict cache_info;
CacheView
*restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
image->matte=MagickTrue;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,&image->exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
&image->exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=opacity;
q++;
}
status=SyncCacheViewAuthenticPixels(image_view,&image->exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickExport VirtualPixelMethod SetPixelCacheVirtualMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
CacheInfo
*restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.opacity != OpaqueOpacity) &&
(image->matte == MagickFalse))
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
if ((IsPixelGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace((Image *) image,sRGBColorspace);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->matte == MagickFalse)
(void) SetCacheAlphaChannel((Image *) image,OpaqueOpacity);
break;
}
default:
break;
}
return(method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if ((image->clip_mask != (Image *) NULL) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if ((image->mask != (Image *) NULL) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
assert(cache_info->signature == MagickSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->active_index_channel != MagickFalse) &&
(WritePixelCacheIndexes(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
assert(id < (int) cache_info->number_threads);
return(SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickSignature);
if (cache_info->methods.sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
return(cache_info->methods.sync_authentic_pixels_handler(image,exception));
assert(id < (int) cache_info->number_threads);
return(SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheIndexes() writes the colormap indexes to the specified
% region of the pixel cache.
%
% The format of the WritePixelCacheIndexes() method is:
%
% MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the colormap indexes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheIndexes(CacheInfo *cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const IndexPacket
*restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->active_index_channel == MagickFalse)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(IndexPacket);
rows=nexus_info->region.height;
extent=(MagickSizeType) length*rows;
p=nexus_info->indexes;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register IndexPacket
*restrict q;
/*
Write indexes to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->indexes+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write indexes to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
sizeof(PixelPacket)+offset*sizeof(*p),length,(const unsigned char *)
p);
if ((MagickSizeType) count < length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write indexes to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheIndexes((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
NexusInfo *restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const PixelPacket
*restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*sizeof(PixelPacket);
rows=nexus_info->region.height;
extent=length*rows;
p=nexus_info->pixels;
y=0;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register PixelPacket
*restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width;
q+=cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
sizeof(*p),length,(const unsigned char *) p);
if ((MagickSizeType) count < length)
break;
p+=nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
transpose_x_coo.c | #include "alphasparse/format.h"
#include <stdlib.h>
#include <alphasparse/opt.h>
#include <alphasparse/util.h>
#include <memory.h>
static int row_first_cmp(const ALPHA_Point *a, const ALPHA_Point *b)
{
if (a->x != b->x)
return a->x - b->x;
return a->y - b->y;
}
alphasparse_status_t ONAME(const ALPHA_SPMAT_COO *s, ALPHA_SPMAT_COO **d)
{
ALPHA_INT nnz = s->nnz;
ALPHA_INT num_threads = alpha_get_thread_num();
ALPHA_Point *points = alpha_memalign(nnz * sizeof(ALPHA_Point), DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < nnz; ++i)
{
points[i].x = s->col_indx[i];
points[i].y = s->row_indx[i];
points[i].v = s->values[i];
}
qsort(points, nnz, sizeof(ALPHA_Point), (__compar_fn_t)row_first_cmp);
ALPHA_SPMAT_COO *mat = alpha_malloc(sizeof(ALPHA_SPMAT_COO));
*d = mat;
mat->rows = s->cols;
mat->cols = s->rows;
mat->nnz = s->nnz;
mat->row_indx = alpha_memalign(sizeof(ALPHA_INT) * nnz, DEFAULT_ALIGNMENT);
mat->col_indx = alpha_memalign(sizeof(ALPHA_INT) * nnz, DEFAULT_ALIGNMENT);
mat->values = alpha_memalign(sizeof(ALPHA_Number) * nnz, DEFAULT_ALIGNMENT);
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT i = 0; i < nnz; i++)
{
mat->row_indx[i] = points[i].x;
mat->col_indx[i] = points[i].y;
mat->values[i] = points[i].v;
}
alpha_free(points);
return ALPHA_SPARSE_STATUS_SUCCESS;
} |
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) && (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->colors,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse)
atDepth=MagickTrue;
if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% either 0 or QuantumRange. Otherwise undefined is returned.
%
% The format of the IdentifyImageGray method is:
%
% ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageGray(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(image->type);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(UndefinedType);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelGray(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsPixelMonochrome(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if ((type == GrayscaleType) && (image->matte != MagickFalse))
type=GrayscaleMatteType;
return(type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image
% have the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IdentifyImageMonochrome method is:
%
% MagickBooleanType IdentifyImageMonochrome(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsPixelMonochrome(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I d e n t i f y I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IdentifyImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,IdentifyImageType(image,exception),exception);
%
% The format of the IdentifyImageType method is:
%
% ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType IdentifyImageType(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IdentifyImageMonochrome(image,exception) != MagickFalse)
return(BilevelType);
if (IdentifyImageGray(image,exception) != UndefinedType)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IdentifyPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if the type of the image is grayscale or
% bi-level.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if type of the image is bi-level.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->type == BilevelType)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].red),range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].green),range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].blue),range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel((MagickRealType) image->colormap[i].opacity),range),
range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (1UL*QuantumRange <= MaxMap)
RestoreMSCWarning
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelRed(q)),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelGreen(q)),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelBlue(q)),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
(MagickRealType) GetPixelOpacity(q)),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
status=TransformImageColorspace(image,GRAYColorspace);
(void) NormalizeImage(image);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
status=TransformImageColorspace(image,CMYKColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
|
GB_binop__isne_int64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64)
// A*D function (colscale): GB (_AxD__isne_int64)
// D*A function (rowscale): GB (_DxB__isne_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64)
// C=scalar+B GB (_bind1st__isne_int64)
// C=scalar+B' GB (_bind1st_tran__isne_int64)
// C=A+scalar GB (_bind2nd__isne_int64)
// C=A'+scalar GB (_bind2nd_tran__isne_int64)
// C type: int64_t
// A type: int64_t
// A pattern? 0
// B type: int64_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int64_t alpha_scalar ;
int64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int64_t *) alpha_scalar_in)) ;
beta_scalar = (*((int64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test_model.c | /*****************************************************************************
*
* test_model.c
*
* Unit test for the currently compiled model (D3Q15 or D3Q19).
*
* Edinburgh Soft Matter and Statistical Physics Group
* Edinburgh Parallel Computing Centre
*
* (c) 2010-2021 The University of Edinburgh
*
* Contributing authors:
* Kevin Stratford (kevin@epcc.ed.ac.uk)
*
*****************************************************************************/
#include <assert.h>
#include <float.h>
#include <math.h>
#include <stdlib.h>
#include "pe.h"
#include "coords.h"
#include "util.h"
#include "cs_limits.h"
#include "lb_model_s.h"
#include "tests.h"
static void test_model_constants(void);
static void test_model_velocity_set(void);
int do_test_model_distributions(pe_t * pe, cs_t * cs);
int do_test_model_halo_swap(pe_t * pe, cs_t * cs);
int do_test_model_reduced_halo_swap(pe_t * pe, cs_t * cs);
int do_test_lb_model_io(pe_t * pe, cs_t * cs);
static int test_model_is_domain(cs_t * cs, int ic, int jc, int kc);
typedef struct lb_data_options_s {
int ndim;
int nvel;
int ndist;
} lb_data_options_t;
/* Utility to return a unique value for global (ic,jc,kc,p) */
/* This allows e.g., tests to check distribution values in parallel
* exchanges. */
/* (ic, jc, kc) are local indices */
/* Result could be unsigned integer... */
#include <stdint.h>
int64_t lb_data_index(lb_t * lb, int ic, int jc, int kc, int p) {
int64_t index = INT64_MIN;
int64_t nall[3] = {};
int64_t nstr[3] = {};
int64_t pstr = 0;
int ntotal[3] = {};
int offset[3] = {};
int nhalo = 0;
assert(lb);
assert(0 <= p && p < lb->model.nvel);
cs_ntotal(lb->cs, ntotal);
cs_nlocal_offset(lb->cs, offset);
cs_nhalo(lb->cs, &nhalo);
nall[X] = ntotal[X] + 2*nhalo;
nall[Y] = ntotal[Y] + 2*nhalo;
nall[Z] = ntotal[Z] + 2*nhalo;
nstr[Z] = 1;
nstr[Y] = nstr[Z]*nall[Z];
nstr[X] = nstr[Y]*nall[Y];
pstr = nstr[X]*nall[X];
{
int igl = offset[X] + ic;
int jgl = offset[Y] + jc;
int kgl = offset[Z] + kc;
/* A periodic system */
igl = igl % ntotal[X];
jgl = jgl % ntotal[Y];
kgl = kgl % ntotal[Z];
if (igl < 1) igl = igl + ntotal[X];
if (jgl < 1) jgl = jgl + ntotal[Y];
if (kgl < 1) kgl = kgl + ntotal[Z];
assert(1 <= igl && igl <= ntotal[X]);
assert(1 <= jgl && jgl <= ntotal[Y]);
assert(1 <= kgl && kgl <= ntotal[Z]);
index = pstr*p + nstr[X]*igl + nstr[Y]*jgl + nstr[Z]*kgl;
}
return index;
}
int lb_data_create(pe_t * pe, cs_t * cs, const lb_data_options_t * options,
lb_t ** lb);
int lb_data_create(pe_t * pe, cs_t * cs, const lb_data_options_t * options,
lb_t ** lb) {
lb_t * obj = NULL;
assert(pe);
assert(cs);
assert(options);
assert(lb);
obj = (lb_t *) calloc(1, sizeof(lb_t));
assert(obj);
if (obj == NULL) pe_fatal(pe, "calloc(1, lb_t) failed\n");
/* Check options */
obj->pe = pe;
obj->cs = cs;
obj->ndim = options->ndim;
obj->nvel = options->nvel;
obj->ndist = options->ndist;
lb_model_create(obj->nvel, &obj->model);
/* Storage */
{
/* Allocate storage following cs specification */
int nhalo = 1;
int nlocal[3] = {};
cs_nhalo(cs, &nhalo);
cs_nlocal(cs, nlocal);
{
int nx = nlocal[X] + 2*nhalo;
int ny = nlocal[Y] + 2*nhalo;
int nz = nlocal[Z] + 2*nhalo;
obj->nsite = nx*ny*nz;
}
{
size_t sz = sizeof(double)*obj->nsite*obj->nvel;
assert(sz > 0); /* Should not overflow in size_t I hope! */
obj->f = (double *) mem_aligned_malloc(MEM_PAGESIZE, sz);
assert(obj->f);
if (obj->f == NULL) pe_fatal(pe, "malloc(lb->f) failed\n");
}
}
*lb = obj;
return 0;
}
int lb_data_free(lb_t * lb) {
assert(lb);
free(lb->f);
lb_model_free(&lb->model);
free(lb);
return 0;
}
/* We will not exceed 27 directions! Direction index 0, in keeping
* with the LB model definition, is (0,0,0) - so no communication. */
typedef struct lb_halo_s {
MPI_Comm comm; /* coords: Cartesian communicator */
int nbrrank[3][3][3]; /* coords: neighbour rank look-up */
int nlocal[3]; /* coords: local domain size */
lb_model_t map; /* Communication map 2d or 3d */
int tagbase; /* send/recv tag */
int full; /* All velocities at each site required. */
int count[27]; /* halo: item data count per direction */
cs_limits_t slim[27]; /* halo: send data region (rectangular) */
cs_limits_t rlim[27]; /* halo: recv data region (rectangular) */
double * send[27]; /* halo: send buffer per direction */
double * recv[27]; /* halo: recv buffer per direction */
MPI_Request request[2*27]; /* halo: array of requests */
} lb_halo_t;
/*****************************************************************************
*
* lb_halo_size
*
* Utility to compute a number of sites from cs_limits_t.
*
*****************************************************************************/
int cs_limits_size(cs_limits_t lim) {
int szx = 1 + lim.imax - lim.imin;
int szy = 1 + lim.jmax - lim.jmin;
int szz = 1 + lim.kmax - lim.kmin;
return szx*szy*szz;
}
/*****************************************************************************
*
* lb_halo_enqueue_send
*
* Pack the send buffer. The ireq determines the direction of the
* communication.
*
*****************************************************************************/
int lb_halo_enqueue_send(const lb_t * lb, const lb_halo_t * h, int ireq) {
assert(1 <= ireq && ireq < h->map.nvel);
assert(lb->ndist == 1);
if (h->count[ireq] > 0) {
int8_t mx = h->map.cv[ireq][X];
int8_t my = h->map.cv[ireq][Y];
int8_t mz = h->map.cv[ireq][Z];
int8_t mm = mx*mx + my*my + mz*mz;
int ib = 0; /* Buffer index */
assert(mm == 1 || mm == 2 || mm == 3);
for (int ic = h->slim[ireq].imin; ic <= h->slim[ireq].imax; ic++) {
for (int jc = h->slim[ireq].jmin; jc <= h->slim[ireq].jmax; jc++) {
for (int kc = h->slim[ireq].kmin; kc <= h->slim[ireq].kmax; kc++) {
/* If full, we need p = 0 */
for (int p = 0; p < lb->nvel; p++) {
int8_t px = lb->model.cv[p][X];
int8_t py = lb->model.cv[p][Y];
int8_t pz = lb->model.cv[p][Z];
int dot = mx*px + my*py + mz*pz;
if (h->full || dot == mm) {
int index = cs_index(lb->cs, ic, jc, kc);
int laddr = LB_ADDR(lb->nsite, lb->ndist, lb->nvel, index, 0, p);
h->send[ireq][ib++] = lb->f[laddr];
}
}
}
}
}
assert(ib == h->count[ireq]*cs_limits_size(h->slim[ireq]));
}
return 0;
}
/*****************************************************************************
*
* lb_halo_dequeue_recv
*
* Unpack the recv buffer into place in the distributions.
*
*****************************************************************************/
int lb_halo_dequeue_recv(lb_t * lb, const lb_halo_t * h, int ireq) {
assert(lb);
assert(h);
assert(0 < ireq && ireq < h->map.nvel);
assert(lb->ndist == 1);
if (h->count[ireq] > 0) {
/* The communication direction is reversed cf. the send... */
int8_t mx = h->map.cv[h->map.nvel-ireq][X];
int8_t my = h->map.cv[h->map.nvel-ireq][Y];
int8_t mz = h->map.cv[h->map.nvel-ireq][Z];
int8_t mm = mx*mx + my*my + mz*mz;
int ib = 0; /* Buffer index */
double * recv = h->recv[ireq];
{
int i = 1 + mx;
int j = 1 + my;
int k = 1 + mz;
/* If Cartesian neighbour is self, just copy out of send buffer. */
if (h->nbrrank[i][j][k] == h->nbrrank[1][1][1]) recv = h->send[ireq];
}
assert(mm == 1 || mm == 2 || mm == 3);
for (int ic = h->rlim[ireq].imin; ic <= h->rlim[ireq].imax; ic++) {
for (int jc = h->rlim[ireq].jmin; jc <= h->rlim[ireq].jmax; jc++) {
for (int kc = h->rlim[ireq].kmin; kc <= h->rlim[ireq].kmax; kc++) {
for (int p = 0; p < lb->nvel; p++) {
/* For reduced swap, we must have -cv[p] here... */
int8_t px = lb->model.cv[lb->nvel-p][X];
int8_t py = lb->model.cv[lb->nvel-p][Y];
int8_t pz = lb->model.cv[lb->nvel-p][Z];
int dot = mx*px + my*py + mz*pz;
if (h->full || dot == mm) {
int index = cs_index(lb->cs, ic, jc, kc);
int laddr = LB_ADDR(lb->nsite, lb->ndist, lb->nvel, index, 0, p);
lb->f[laddr] = recv[ib++];
}
}
}
}
}
assert(ib == h->count[ireq]*cs_limits_size(h->rlim[ireq]));
}
return 0;
}
/*****************************************************************************
*
* lb_halo_create
*
* Currently: generate all send and receive requests.
*
*****************************************************************************/
int lb_halo_create(const lb_t * lb, lb_halo_t * h, int full) {
lb_halo_t hnull = {};
assert(lb);
assert(h);
*h = hnull;
/* Communication model */
if (lb->model.ndim == 2) lb_model_create( 9, &h->map);
if (lb->model.ndim == 3) lb_model_create(27, &h->map);
assert(h->map.ndim == lb->model.ndim);
cs_nlocal(lb->cs, h->nlocal);
cs_cart_comm(lb->cs, &h->comm);
h->tagbase = 211216;
h->full = full;
/* Determine look-up table of ranks of neighbouring processes */
{
int dims[3] = {};
int periods[3] = {};
int coords[3] = {};
MPI_Cart_get(h->comm, h->map.ndim, dims, periods, coords);
for (int p = 0; p < h->map.nvel; p++) {
int nbr[3] = {};
int out[3] = {}; /* Out-of-range is erroneous for non-perioidic dims */
int i = 1 + h->map.cv[p][X];
int j = 1 + h->map.cv[p][Y];
int k = 1 + h->map.cv[p][Z];
nbr[X] = coords[X] + h->map.cv[p][X];
nbr[Y] = coords[Y] + h->map.cv[p][Y];
nbr[Z] = coords[Z] + h->map.cv[p][Z];
out[X] = (!periods[X] && (nbr[X] < 0 || nbr[X] > dims[X]));
out[Y] = (!periods[Y] && (nbr[Y] < 0 || nbr[Y] > dims[Y]));
out[Z] = (!periods[Z] && (nbr[Z] < 0 || nbr[Z] > dims[Z]));
if (out[X] || out[Y] || out[Z]) {
h->nbrrank[i][j][k] = MPI_PROC_NULL;
}
else {
MPI_Cart_rank(h->comm, nbr, &h->nbrrank[i][j][k]);
}
}
/* I must be in the middle */
assert(h->nbrrank[1][1][1] == cs_cart_rank(lb->cs));
}
/* Limits of the halo regions in each communication direction */
for (int p = 1; p < h->map.nvel; p++) {
/* Limits for send and recv regions*/
int8_t cx = h->map.cv[p][X];
int8_t cy = h->map.cv[p][Y];
int8_t cz = h->map.cv[p][Z];
cs_limits_t send = {1, h->nlocal[X], 1, h->nlocal[Y], 1, h->nlocal[Z]};
cs_limits_t recv = {1, h->nlocal[X], 1, h->nlocal[Y], 1, h->nlocal[Z]};
if (cx == -1) send.imax = 1;
if (cx == +1) send.imin = send.imax;
if (cy == -1) send.jmax = 1;
if (cy == +1) send.jmin = send.jmax;
if (cz == -1) send.kmax = 1;
if (cz == +1) send.kmin = send.kmax;
/* velocity is reversed... */
if (cx == +1) recv.imax = recv.imin = 0;
if (cx == -1) recv.imin = recv.imax = recv.imax + 1;
if (cy == +1) recv.jmax = recv.jmin = 0;
if (cy == -1) recv.jmin = recv.jmax = recv.jmax + 1;
if (cz == +1) recv.kmax = recv.kmin = 0;
if (cz == -1) recv.kmin = recv.kmax = recv.kmax + 1;
h->slim[p] = send;
h->rlim[p] = recv;
}
/* Message count (velocities) for each communication direction */
for (int p = 1; p < h->map.nvel; p++) {
int count = 0;
if (h->full) {
count = lb->model.nvel;
}
else {
int8_t mx = h->map.cv[p][X];
int8_t my = h->map.cv[p][Y];
int8_t mz = h->map.cv[p][Z];
int8_t mm = mx*mx + my*my + mz*mz;
/* Consider each model velocity in turn */
for (int q = 1; q < lb->model.nvel; q++) {
int8_t qx = lb->model.cv[q][X];
int8_t qy = lb->model.cv[q][Y];
int8_t qz = lb->model.cv[q][Z];
int8_t dot = mx*qx + my*qy + mz*qz;
if (mm == 3 && dot == mm) count +=1; /* This is a corner */
if (mm == 2 && dot == mm) count +=1; /* This is an edge */
if (mm == 1 && dot == mm) count +=1; /* This is a side */
}
}
h->count[p] = count;
/* Allocate send buffer for send region */
if (count > 0) {
int scount = count*cs_limits_size(h->slim[p]);
h->send[p] = (double *) malloc(scount*sizeof(double));
assert(h->send[p]);
}
/* Allocate recv buffer */
if (count > 0) {
int rcount = count*cs_limits_size(h->rlim[p]);
h->recv[p] = (double *) malloc(rcount*sizeof(double));
assert(h->recv[p]);
}
}
/* Post recvs (from opposite direction cf send) */
for (int ireq = 0; ireq < h->map.nvel; ireq++) {
h->request[ireq] = MPI_REQUEST_NULL;
if (h->count[ireq] > 0) {
int i = 1 + h->map.cv[h->map.nvel-ireq][X];
int j = 1 + h->map.cv[h->map.nvel-ireq][Y];
int k = 1 + h->map.cv[h->map.nvel-ireq][Z];
int mcount = h->count[ireq]*cs_limits_size(h->rlim[ireq]);
if (h->nbrrank[i][j][k] == h->nbrrank[1][1][1]) mcount = 0;
MPI_Irecv(h->recv[ireq], mcount, MPI_DOUBLE, h->nbrrank[i][j][k],
h->tagbase + ireq, h->comm, h->request + ireq);
}
}
/* Enqueue sends (upper half of request array) */
#pragma omp parallel for schedule(dynamic, 1)
for (int ireq = 0; ireq < h->map.nvel; ireq++) {
h->request[27+ireq] = MPI_REQUEST_NULL;
if (h->count[ireq] > 0) {
int i = 1 + h->map.cv[ireq][X];
int j = 1 + h->map.cv[ireq][Y];
int k = 1 + h->map.cv[ireq][Z];
int mcount = h->count[ireq]*cs_limits_size(h->slim[ireq]);
lb_halo_enqueue_send(lb, h, ireq);
/* Short circuit messages to self. */
if (h->nbrrank[i][j][k] == h->nbrrank[1][1][1]) mcount = 0;
#pragma omp critical
{
MPI_Isend(h->send[ireq], mcount, MPI_DOUBLE, h->nbrrank[i][j][k],
h->tagbase + ireq, h->comm, h->request + 27 + ireq);
}
}
}
return 0;
}
/*****************************************************************************
*
* lb_halo_free
*
* Complete all the send and receive requests.
*
*****************************************************************************/
int lb_halo_free(lb_t * lb, lb_halo_t * h) {
assert(lb);
assert(h);
/* Can free() be used with thread safety? */
#pragma omp parallel for schedule(dynamic, 1)
for (int ireq = 0; ireq < 2*h->map.nvel; ireq++) {
int issatisfied = -1;
MPI_Status status = {};
#pragma omp critical
{
MPI_Waitany(2*h->map.nvel, h->request, &issatisfied, &status);
}
/* Check status is what we expect? */
if (issatisfied == MPI_UNDEFINED) {
/* No action e.g., for (0,0,0) case */
}
else {
/* Handle either send or recv request completion */
if (issatisfied < h->map.nvel) {
/* This is a recv */
int irreq = issatisfied;
lb_halo_dequeue_recv(lb, h, irreq);
free(h->recv[irreq]);
}
else {
/* This was a send */
int isreq = issatisfied - 27;
free(h->send[isreq]);
}
}
}
return 0;
}
/*****************************************************************************
*
* util_lb_data_check_set
*
* Set unique test values in the distribution.
*
*****************************************************************************/
int util_lb_data_check_set(lb_t * lb) {
int nlocal[3] = {};
assert(lb);
cs_nlocal(lb->cs, nlocal);
for (int ic = 1; ic <= nlocal[X]; ic++) {
for (int jc = 1; jc <= nlocal[Y]; jc++) {
for (int kc = 1; kc <= nlocal[Z]; kc++) {
for (int p = 0 ; p < lb->model.nvel; p++) {
int index = cs_index(lb->cs, ic, jc, kc);
int laddr = LB_ADDR(lb->nsite, lb->ndist, lb->nvel, index, 0, p);
lb->f[laddr] = 1.0*lb_data_index(lb, ic, jc, kc, p);
}
}
}
}
return 0;
}
/*****************************************************************************
*
* util_lb_data_check
*
* Examine halo values and check they are as expected.
*
*****************************************************************************/
int util_lb_data_check(lb_t * lb, int full) {
int ifail = 0;
int nh = 1;
int nhk = nh;
int nlocal[3] = {};
assert(lb);
cs_nlocal(lb->cs, nlocal);
/* Fix for 2d, where there should be no halo regions in Z */
if (lb->ndim == 2) nhk = 0;
for (int ic = 1 - nh; ic <= nlocal[X] + nh; ic++) {
for (int jc = 1 - nh; jc <= nlocal[Y] + nh; jc++) {
for (int kc = 1 - nhk; kc <= nlocal[Z] + nhk; kc++) {
int is_halo = (ic < 1 || jc < 1 || kc < 1 ||
ic > nlocal[X] || jc > nlocal[Y] || kc > nlocal[Z]);
if (is_halo == 0) continue;
int index = cs_index(lb->cs, ic, jc, kc);
for (int p = 0; p < lb->model.nvel; p++) {
/* Look for propagating distributions (into domain). */
int icdt = ic + lb->model.cv[p][X];
int jcdt = jc + lb->model.cv[p][Y];
int kcdt = kc + lb->model.cv[p][Z];
is_halo = (icdt < 1 || jcdt < 1 || kcdt < 1 ||
icdt > nlocal[X] || jcdt > nlocal[Y] || kcdt > nlocal[Z]);
if (full || is_halo == 0) {
/* Check */
int laddr = LB_ADDR(lb->nsite, lb->ndist, lb->nvel, index, 0, p);
double fex = 1.0*lb_data_index(lb, ic, jc, kc, p);
if (fabs(fex - lb->f[laddr]) > DBL_EPSILON) ifail += 1;
assert(fabs(fex - lb->f[laddr]) < DBL_EPSILON);
}
}
}
}
}
return ifail;
}
/*****************************************************************************
*
* test_lb_halo_create
*
*****************************************************************************/
int test_lb_halo_create(pe_t * pe, cs_t * cs, int ndim, int nvel, int full) {
lb_data_options_t options = {.ndim = ndim, .nvel = nvel, .ndist = 1};
lb_t * lb = NULL;
assert(pe);
assert(cs);
lb_data_create(pe, cs, &options, &lb);
util_lb_data_check_set(lb);
{
lb_halo_t h = {};
lb_halo_create(lb, &h, full);
lb_halo_free(lb, &h);
}
util_lb_data_check(lb, full);
lb_data_free(lb);
return 0;
}
/*****************************************************************************
*
* test_lb_halo
*
*****************************************************************************/
int test_lb_halo(pe_t * pe) {
assert(pe);
/* Two dimensional system */
{
cs_t * cs = NULL;
int ntotal[3] = {64, 64, 1};
cs_create(pe, &cs);
cs_ntotal_set(cs, ntotal);
cs_init(cs);
test_lb_halo_create(pe, cs, 2, 9, 0);
test_lb_halo_create(pe, cs, 2, 9, 1);
cs_free(cs);
}
/* Three dimensional system */
{
cs_t * cs = NULL;
cs_create(pe, &cs);
cs_init(cs);
test_lb_halo_create(pe, cs, 3, 15, 0);
test_lb_halo_create(pe, cs, 3, 15, 1);
test_lb_halo_create(pe, cs, 3, 19, 0);
test_lb_halo_create(pe, cs, 3, 19, 1);
test_lb_halo_create(pe, cs, 3, 27, 0);
test_lb_halo_create(pe, cs, 3, 27, 1);
cs_free(cs);
}
return 0;
}
/*****************************************************************************
*
* test_model_suite
*
*****************************************************************************/
int test_model_suite(void) {
pe_t * pe = NULL;
cs_t * cs = NULL;
pe_create(MPI_COMM_WORLD, PE_QUIET, &pe);
test_lb_halo(pe);
cs_create(pe, &cs);
cs_init(cs);
/* Test model structure (coordinate-independent stuff) */
test_model_constants();
test_model_velocity_set();
/* Now test actual distributions */
do_test_model_distributions(pe, cs);
do_test_model_halo_swap(pe, cs);
if (DATA_MODEL == DATA_MODEL_AOS && NSIMDVL == 1) {
do_test_model_reduced_halo_swap(pe, cs);
}
do_test_lb_model_io(pe, cs);
pe_info(pe, "PASS ./unit/test_model\n");
cs_free(cs);
pe_free(pe);
return 0;
}
/*****************************************************************************
*
* test_model_constants
*
* Check the various constants associated with the reduced halo swap.
*
*****************************************************************************/
static void test_model_constants(void) {
#ifdef TEST_TO_BE_REMOVED_WITH_GLOBAL_SYMBOLS
int i, k, p;
for (i = 0; i < CVXBLOCK; i++) {
for (k = 0; k < xblocklen_cv[i]; k++) {
p = xdisp_fwd_cv[i] + k;
test_assert(p >= 0 && p < NVEL);
test_assert(cv[p][X] == +1);
p = xdisp_bwd_cv[i] + k;
test_assert(p >= 0 && p < NVEL);
test_assert(cv[p][X] == -1);
}
}
for (i = 0; i < CVYBLOCK; i++) {
for (k = 0; k < yblocklen_cv[i]; k++) {
p = ydisp_fwd_cv[i] + k;
test_assert(p >= 0 && p < NVEL);
test_assert(cv[p][Y] == +1);
p = ydisp_bwd_cv[i] + k;
test_assert(p >= 0 && p < NVEL);
test_assert(cv[p][Y] == -1);
}
}
for (i = 0; i < CVZBLOCK; i++) {
for (k = 0; k < zblocklen_cv[i]; k++) {
p = zdisp_fwd_cv[i] + k;
test_assert(p >= 0 && p < NVEL);
test_assert(cv[p][Z] == +1);
p = zdisp_bwd_cv[i] + k;
test_assert(p >= 0 && p < NVEL);
test_assert(cv[p][Z] == -1);
}
}
#endif
return;
}
/*****************************************************************************
*
* test_model_velocity_set
*
* Check the velocities, kinetic projector, tables of eigenvectors
* etc etc are all consistent for the current model.
*
*****************************************************************************/
static void test_model_velocity_set(void) {
test_assert(NHYDRO == (1 + NDIM + NDIM*(NDIM+1)/2));
return;
}
/*****************************************************************************
*
* do_test_model_distributions
*
* Test the distribution interface.
*
*****************************************************************************/
int do_test_model_distributions(pe_t * pe, cs_t * cs) {
int i, n, p;
int index = 1;
int ndist = 2;
double fvalue, fvalue_expected;
double u[3];
lb_t * lb;
assert(pe);
assert(cs);
/* Tests of the basic distribution functions. */
lb_create(pe, cs, &lb);
assert(lb);
lb_ndist(lb, &n);
assert(n == 1); /* Default */
lb_ndist_set(lb, ndist);
lb_init(lb);
/* Report the number of distributions */
lb_ndist(lb, &n);
assert(n == ndist);
for (n = 0; n < ndist; n++) {
for (p = 0; p < lb->model.nvel; p++) {
fvalue_expected = 0.01*n + lb->model.wv[p];
lb_f_set(lb, index, p, n, fvalue_expected);
lb_f(lb, index, p, n, &fvalue);
assert(fabs(fvalue - fvalue_expected) < DBL_EPSILON);
}
/* Check zeroth moment... */
fvalue_expected = 0.01*n*lb->model.nvel + 1.0;
lb_0th_moment(lb, index, (lb_dist_enum_t) n, &fvalue);
assert(fabs(fvalue - fvalue_expected) <= DBL_EPSILON);
/* Check first moment... */
lb_1st_moment(lb, index, (n == 0) ? LB_RHO : LB_PHI, u);
for (i = 0; i < lb->model.ndim; i++) {
assert(fabs(u[i] - 0.0) < DBL_EPSILON);
}
}
lb_free(lb);
return 0;
}
/*****************************************************************************
*
* do_test_model_halo_swap
*
* Test full halo swap.
*
*****************************************************************************/
int do_test_model_halo_swap(pe_t * pe, cs_t * cs) {
int i, j, k, p;
int n, ndist = 2;
int index, nlocal[3];
const int nextra = 1; /* Distribution halo width always 1 */
double f_expect;
double f_actual;
lb_t * lb = NULL;
assert(pe);
assert(cs);
lb_create(pe, cs, &lb);
assert(lb);
lb_ndist_set(lb, ndist);
lb_init(lb);
cs_nlocal(cs, nlocal);
/* The test relies on a uniform decomposition in parallel:
*
* f[0] or f[X] is set to local x index,
* f[1] or f[Y] is set to local y index
* f[2] or f[Z] is set to local z index
* remainder are set to velocity index. */
for (i = 1; i <= nlocal[X]; i++) {
for (j = 1; j <= nlocal[Y]; j++) {
for (k = 1; k <= nlocal[Z]; k++) {
index = cs_index(cs, i, j, k);
for (n = 0; n < ndist; n++) {
lb_f_set(lb, index, X, n, (double) (i));
lb_f_set(lb, index, Y, n, (double) (j));
lb_f_set(lb, index, Z, n, (double) (k));
for (p = 3; p < lb->model.nvel; p++) {
lb_f_set(lb, index, p, n, (double) p);
}
}
}
}
}
lb_memcpy(lb, tdpMemcpyHostToDevice);
lb_halo(lb);
lb_memcpy(lb, tdpMemcpyDeviceToHost);
/* Test all the sites not in the interior */
for (i = 1 - nextra; i <= nlocal[X] + nextra; i++) {
if (i >= 1 && i <= nlocal[X]) continue;
for (j = 1 - nextra; j <= nlocal[Y] + nextra; j++) {
if (j >= 1 && j <= nlocal[Y]) continue;
for (k = 1 - nextra; k <= nlocal[Z] + nextra; k++) {
if (k >= 1 && k <= nlocal[Z]) continue;
index = cs_index(cs, i, j, k);
for (n = 0; n < ndist; n++) {
f_expect = 1.0*abs(i - nlocal[X]);
lb_f(lb, index, X, n, &f_actual);
test_assert(fabs(f_actual - f_expect) < DBL_EPSILON);
f_expect = 1.0*abs(j - nlocal[Y]);
lb_f(lb, index, Y, n, &f_actual);
test_assert(fabs(f_actual - f_expect) < DBL_EPSILON);
f_expect = 1.0*abs(k - nlocal[Z]);
lb_f(lb, index, Z, n, &f_actual);
test_assert(fabs(f_actual - f_expect) < DBL_EPSILON);
for (p = 3; p < lb->model.nvel; p++) {
lb_f(lb, index, p, n, &f_actual);
f_expect = (double) p;
test_assert(fabs(f_actual - f_expect) < DBL_EPSILON);
}
}
}
}
}
lb_free(lb);
return 0;
}
/*****************************************************************************
*
* do_test_model_reduced_halo_swap
*
*****************************************************************************/
int do_test_model_reduced_halo_swap(pe_t * pe, cs_t * cs) {
int i, j, k, p;
int icdt, jcdt, kcdt;
int index, nlocal[3];
int n, ndist = 2;
const int nextra = 1;
double f_expect;
double f_actual;
lb_t * lb = NULL;
assert(pe);
assert(cs);
lb_create(pe, cs, &lb);
assert(lb);
lb_ndist_set(lb, ndist);
lb_init(lb);
lb_halo_set(lb, LB_HALO_REDUCED);
cs_nlocal(cs, nlocal);
/* Set everything which is NOT in a halo */
for (i = 1; i <= nlocal[X]; i++) {
for (j = 1; j <= nlocal[Y]; j++) {
for (k = 1; k <= nlocal[Z]; k++) {
index = cs_index(cs, i, j, k);
for (n = 0; n < ndist; n++) {
for (p = 0; p < lb->model.nvel; p++) {
f_expect = 1.0*(n*lb->model.nvel + p);
lb_f_set(lb, index, p, n, f_expect);
}
}
}
}
}
lb_halo_via_struct(lb);
/* Now check that the interior sites are unchanged */
for (i = 1; i <= nlocal[X]; i++) {
for (j = 1; j <= nlocal[Y]; j++) {
for (k = 1; k <= nlocal[Z]; k++) {
index = cs_index(cs, i, j, k);
for (n = 0; n < ndist; n++) {
for (p = 0; p < lb->model.nvel; p++) {
lb_f(lb, index, p, n, &f_actual);
f_expect = 1.0*(n*lb->model.nvel + p);
test_assert(fabs(f_expect - f_actual) < DBL_EPSILON);
}
}
}
}
}
/* Also check the halos sites. The key test of the reduced halo
* swap is that distributions for which r + c_i dt takes us into
* the domain proper must be correct. */
for (i = 1 - nextra; i <= nlocal[X] + nextra; i++) {
if (i >= 1 && i <= nlocal[X]) continue;
for (j = 1 - nextra; j <= nlocal[Y] + nextra; j++) {
if (j >= 1 && j <= nlocal[Y]) continue;
for (k = 1 - nextra; k <= nlocal[Z] + nextra; k++) {
if (k >= 1 && k <= nlocal[Z]) continue;
index = cs_index(cs, i, j, k);
for (n = 0; n < ndist; n++) {
for (p = 0; p < lb->model.nvel; p++) {
lb_f(lb, index, p, n, &f_actual);
f_expect = 1.0*(n*lb->model.nvel + p);
icdt = i + lb->model.cv[p][X];
jcdt = j + lb->model.cv[p][Y];
kcdt = k + lb->model.cv[p][Z];
if (test_model_is_domain(cs, icdt, jcdt, kcdt)) {
test_assert(fabs(f_actual - f_expect) < DBL_EPSILON);
}
}
}
/* Next site */
}
}
}
lb_free(lb);
return 0;
}
/*****************************************************************************
*
* test_model_is_domain
*
* Is (ic, jc, kc) in the domain proper?
*
*****************************************************************************/
static int test_model_is_domain(cs_t * cs, int ic, int jc, int kc) {
int nlocal[3];
int iam = 1;
assert(cs);
cs_nlocal(cs, nlocal);
if (ic < 1) iam = 0;
if (jc < 1) iam = 0;
if (kc < 1) iam = 0;
if (ic > nlocal[X]) iam = 0;
if (jc > nlocal[Y]) iam = 0;
if (kc > nlocal[Z]) iam = 0;
return iam;
}
/*****************************************************************************
*
* do_test_lb_model_io
*
*****************************************************************************/
int do_test_lb_model_io(pe_t * pe, cs_t * cs) {
int ndist = 2;
lb_t * lbrd = NULL;
lb_t * lbwr = NULL;
assert(pe);
assert(cs);
lb_create_ndist(pe, cs, ndist, &lbrd);
lb_create_ndist(pe, cs, ndist, &lbwr);
lb_init(lbwr);
lb_init(lbrd);
/* Write */
/* Read */
/* Compare */
lb_free(lbwr);
lb_free(lbrd);
return 0;
}
|
task_depend_omp.c | /* --- File task_depend_omp.c --- */
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char **argv) {
int N = 8;
int x[N][N];
int i,j;
/* Initialize x */
for(i=0;i<N;i++)
for(j=0;j<N;j++)
x[i][j]=i+j;
/* Serial computation */
for(i=1;i<N;i++){
for(j=1;j<N;j++)
x[i][j] = x[i-1][j] + x[i][j-1];
}
printf("Serial result:\n");
for(i=1;i<N;i++){
for(j=1;j<N;j++)
printf("%8d ",x[i][j]);
printf("\n");
}
/* Reset x */
for(i=0;i<N;i++)
for(j=0;j<N;j++)
x[i][j]=i+j;
/* Parallel computation */
#pragma omp parallel
#pragma omp single
/* Generate parallel tasks */
for(i=1;i<N;i++){
for(j=1;j<N;j++)
#pragma omp task depend(out:x)
x[i][j] = x[i-1][j] + x[i][j-1];
}
printf("Parallel result:\n");
for(i=1;i<N;i++){
for(j=1;j<N;j++)
printf("%8d ",x[i][j]);
printf("\n");
}
}
|
conv1x1s2_pack4_neon.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv1x1s2_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt)
{
Mat bottom_blob_shrinked = top_blob;
int w = bottom_blob.w;
int channels = bottom_blob.c;
//size_t elemsize = bottom_blob.elemsize;
//int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2*outw + w) * 4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<channels; p++)
{
const float* r0 = bottom_blob.channel(p);
float* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
float32x4_t _v = vld1q_f32(r0);
vst1q_f32(outptr, _v);
r0 += 8;
outptr += 4;
}
r0 += tailstep;
}
}
}
}
|
graph.impl.h | /**
* Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
* Copyright (c) 2017, Daniel Thuerck, TU Darmstadt - GCC. All rights reserved.
*
* This software may be modified and distributed under the terms
* of the BSD 3-clause license. See the LICENSE file for details.
*/
#include <libs/data_structures/graph.h>
#include <queue>
#include <iostream>
#include <climits>
NS_CULIP_BEGIN
NS_DATA_STRUCTURES_BEGIN
/**
* *****************************************************************************
* Graph<T> - public
* *****************************************************************************
*/
template<typename T>
Graph<T>::
Graph(
const params_ptr<T>& params,
const size_t num_nodes)
: m_params(params),
m_num_nodes(num_nodes),
m_adj_lists(num_nodes),
m_edges()
{
}
/* ************************************************************************** */
template<typename T>
Graph<T>::
~Graph()
{
}
/* ************************************************************************** */
template<typename T>
size_t
Graph<T>::
get_num_nodes()
{
return m_num_nodes;
}
/* ************************************************************************** */
template<typename T>
const
std::vector<edge<T>>&
Graph<T>::
get_edges()
{
return m_edges;
}
/* ************************************************************************** */
template<typename T>
const std::vector<std::pair<index_t, T>>
Graph<T>::
operator[](
const index_t n)
{
std::vector<std::pair<index_t, T>> adjacent;
if (n >= m_num_nodes)
return adjacent;
for (index_t i = 0; i < m_adj_lists[n].size(); ++i)
{
const edge<T>& e = m_edges[m_adj_lists[n][i]];
adjacent.push_back(std::make_pair(std::get<0>(e) == n ?
std::get<1>(e) : std::get<0>(e), std::get<2>(e)));
}
return adjacent;
}
/* ************************************************************************** */
template<typename T>
void
Graph<T>::
add_edge(
const index_t n1,
const index_t n2,
const T weight)
{
if (n1 >= m_num_nodes || n2 >= m_num_nodes || n1 == n2)
return;
/* check if there is already such an edge */
for (const std::pair<index_t, T>& i : (*this)[n1])
{
if (i.first == n2)
{
/* find this edge and increase weight by 1 */
if (m_params->p_edge_mode != culip_graph_edge_mode_t::UNIT)
{
for (const index_t& e_id : m_adj_lists[n1])
{
if (std::get<0>(m_edges[e_id]) == n2 ||
std::get<1>(m_edges[e_id]) == n2)
std::get<2>(m_edges[e_id]) += weight;
}
}
return;
}
}
m_edges.push_back(edge<T>(n1, n2, weight));
m_adj_lists[n1].push_back(m_edges.size() - 1);
m_adj_lists[n2].push_back(m_edges.size() - 1);
}
/* ************************************************************************** */
template<typename T>
void
Graph<T>::
mark_bfs(
const index_t start_node,
std::vector<size_t>& markers)
{
markers.clear();
markers.resize(m_num_nodes, -1);
std::queue<int> qu;
qu.push(start_node);
markers[start_node] = 0;
while(!qu.empty())
{
const int cur = qu.front();
qu.pop();
for (const std::pair<int, T>& e : (*this)[cur])
{
if (markers[e.first] < 0)
{
markers[e.first] = markers[cur] + 1;
qu.push(e.first);
}
}
}
}
/* ************************************************************************** */
template<typename T>
void
Graph<T>::
connect()
{
/**
* Execute BFS from random nodes, each node that is
* not traversed is then augmented by a virtual
* edge.
* Repeat until only one connected component left.
*/
std::cout << "Connecting graph..." << std::endl;
std::vector<size_t> marker;
bool connected = false;
while(!connected)
{
connected = true;
const index_t start_node = rand() % m_num_nodes;
mark_bfs(start_node, marker);
for (index_t i = 0; i < m_num_nodes; ++i)
{
if (marker[i] < 0)
{
connected = false;
for (index_t j = i + 1; j < m_num_nodes; ++j)
{
if (marker[j] >= 0)
{
add_edge(i, j, 0);
}
}
}
}
}
}
/* ************************************************************************** */
template<typename T>
size_t
Graph<T>::
longest_shortest_path(
index_t& n1,
index_t& n2)
{
/**
* Determine longest (shortest) path by executing a BFS from each node
* as start node, then selecting the longest path in the
* resulting (implicit) matrix.
*/
std::cout << "Finding all-pairs shorted paths..." << std::endl;
size_t longest_path = 0;
#pragma omp parallel for shared(longest_path)
for (index_t nd1 = 0; nd1 < m_num_nodes; ++nd1)
{
std::vector<size_t> markers(m_num_nodes, 0);
mark_bfs(nd1, markers);
size_t local_max_path = 0;
index_t local_max_id = 0;
for (index_t nd2 = nd1 + 1; nd2 < m_num_nodes; ++nd2)
{
if (markers[nd2] > local_max_path)
{
local_max_path = markers[nd2];
local_max_id = nd2;
}
}
#pragma omp critical
{
if (local_max_path > longest_path)
{
longest_path = local_max_path;
n1 = nd1;
n2 = local_max_id;
}
}
}
return longest_path;
}
NS_DATA_STRUCTURES_END
NS_CULIP_END
|
struct_vector.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.36 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_StructVector class.
*
*****************************************************************************/
#include "_hypre_struct_mv.h"
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
hypre_StructVector *
hypre_StructVectorCreate( MPI_Comm comm,
hypre_StructGrid *grid )
{
hypre_StructVector *vector;
HYPRE_Int i;
vector = hypre_CTAlloc(hypre_StructVector, 1);
hypre_StructVectorComm(vector) = comm;
hypre_StructGridRef(grid, &hypre_StructVectorGrid(vector));
hypre_StructVectorDataAlloced(vector) = 1;
hypre_StructVectorBGhostNotClear(vector) = 0;
hypre_StructVectorRefCount(vector) = 1;
/* set defaults */
for (i = 0; i < 6; i++)
{
hypre_StructVectorNumGhost(vector)[i] = hypre_StructGridNumGhost(grid)[i];
}
return vector;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
hypre_StructVector *
hypre_StructVectorRef( hypre_StructVector *vector )
{
hypre_StructVectorRefCount(vector) ++;
return vector;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorDestroy( hypre_StructVector *vector )
{
if (vector)
{
hypre_StructVectorRefCount(vector) --;
if (hypre_StructVectorRefCount(vector) == 0)
{
if (hypre_StructVectorDataAlloced(vector))
{
hypre_SharedTFree(hypre_StructVectorData(vector));
}
hypre_TFree(hypre_StructVectorDataIndices(vector));
hypre_BoxArrayDestroy(hypre_StructVectorDataSpace(vector));
hypre_StructGridDestroy(hypre_StructVectorGrid(vector));
hypre_TFree(vector);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorInitializeShell( hypre_StructVector *vector )
{
hypre_StructGrid *grid;
HYPRE_Int *num_ghost;
hypre_BoxArray *data_space;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Box *data_box;
HYPRE_Int *data_indices;
HYPRE_Int data_size;
HYPRE_Int i, d;
/*-----------------------------------------------------------------------
* Set up data_space
*-----------------------------------------------------------------------*/
grid = hypre_StructVectorGrid(vector);
if (hypre_StructVectorDataSpace(vector) == NULL)
{
num_ghost = hypre_StructVectorNumGhost(vector);
boxes = hypre_StructGridBoxes(grid);
data_space = hypre_BoxArrayCreate(hypre_BoxArraySize(boxes));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
data_box = hypre_BoxArrayBox(data_space, i);
hypre_CopyBox(box, data_box);
for (d = 0; d < 3; d++)
{
hypre_BoxIMinD(data_box, d) -= num_ghost[2*d];
hypre_BoxIMaxD(data_box, d) += num_ghost[2*d + 1];
}
}
hypre_StructVectorDataSpace(vector) = data_space;
}
/*-----------------------------------------------------------------------
* Set up data_indices array and data_size
*-----------------------------------------------------------------------*/
if (hypre_StructVectorDataIndices(vector) == NULL)
{
data_space = hypre_StructVectorDataSpace(vector);
data_indices = hypre_CTAlloc(HYPRE_Int, hypre_BoxArraySize(data_space));
data_size = 0;
hypre_ForBoxI(i, data_space)
{
data_box = hypre_BoxArrayBox(data_space, i);
data_indices[i] = data_size;
data_size += hypre_BoxVolume(data_box);
}
hypre_StructVectorDataIndices(vector) = data_indices;
hypre_StructVectorDataSize(vector) = data_size;
}
/*-----------------------------------------------------------------------
* Set total number of nonzero coefficients
*-----------------------------------------------------------------------*/
hypre_StructVectorGlobalSize(vector) = hypre_StructGridGlobalSize(grid);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorInitializeData( hypre_StructVector *vector,
double *data )
{
hypre_StructVectorData(vector) = data;
hypre_StructVectorDataAlloced(vector) = 0;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorInitialize( hypre_StructVector *vector )
{
double *data;
hypre_StructVectorInitializeShell(vector);
data = hypre_SharedCTAlloc(double, hypre_StructVectorDataSize(vector));
hypre_StructVectorInitializeData(vector, data);
hypre_StructVectorDataAlloced(vector) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* (outside > 0): set values possibly outside of the grid extents
* (outside = 0): set values only inside the grid extents
*
* NOTE: Getting and setting values outside of the grid extents requires care,
* as these values may be stored in multiple ghost zone locations.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorSetValues( hypre_StructVector *vector,
hypre_Index grid_index,
double *values,
HYPRE_Int action,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
double *vecp;
HYPRE_Int i, istart, istop;
if (outside > 0)
{
grid_boxes = hypre_StructVectorDataSpace(vector);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
}
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
if ((hypre_IndexX(grid_index) >= hypre_BoxIMinX(grid_box)) &&
(hypre_IndexX(grid_index) <= hypre_BoxIMaxX(grid_box)) &&
(hypre_IndexY(grid_index) >= hypre_BoxIMinY(grid_box)) &&
(hypre_IndexY(grid_index) <= hypre_BoxIMaxY(grid_box)) &&
(hypre_IndexZ(grid_index) >= hypre_BoxIMinZ(grid_box)) &&
(hypre_IndexZ(grid_index) <= hypre_BoxIMaxZ(grid_box)) )
{
vecp = hypre_StructVectorBoxDataValue(vector, i, grid_index);
if (action > 0)
{
*vecp += *values;
}
else if (action > -1)
{
*vecp = *values;
}
else /* action < 0 */
{
*values = *vecp;
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (action > 0): add-to values
* (action = 0): set values
* (action < 0): get values
*
* (outside > 0): set values possibly outside of the grid extents
* (outside = 0): set values only inside the grid extents
*
* NOTE: Getting and setting values outside of the grid extents requires care,
* as these values may be stored in multiple ghost zone locations.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorSetBoxValues( hypre_StructVector *vector,
hypre_Box *set_box,
hypre_Box *value_box,
double *values,
HYPRE_Int action,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
hypre_Box *int_box;
hypre_BoxArray *data_space;
hypre_Box *data_box;
hypre_IndexRef data_start;
hypre_Index data_stride;
HYPRE_Int datai;
double *datap;
hypre_Box *dval_box;
hypre_Index dval_start;
hypre_Index dval_stride;
HYPRE_Int dvali;
hypre_Index loop_size;
HYPRE_Int i, istart, istop;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
if (outside > 0)
{
grid_boxes = hypre_StructVectorDataSpace(vector);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
}
data_space = hypre_StructVectorDataSpace(vector);
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(data_stride, 1, 1, 1);
int_box = hypre_BoxCreate();
dval_box = hypre_BoxDuplicate(value_box);
hypre_SetIndex(dval_stride, 1, 1, 1);
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
data_box = hypre_BoxArrayBox(data_space, i);
hypre_IntersectBoxes(set_box, grid_box, int_box);
/* if there was an intersection */
if (hypre_BoxVolume(int_box))
{
data_start = hypre_BoxIMin(int_box);
hypre_CopyIndex(data_start, dval_start);
datap = hypre_StructVectorBoxData(vector, i);
hypre_BoxGetSize(int_box, loop_size);
if (action > 0)
{
hypre_BoxLoop2Begin(hypre_StructVectorDim(vector), loop_size,
data_box,data_start,data_stride,datai,
dval_box,dval_start,dval_stride,dvali);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(datai, dvali)
{
datap[datai] += values[dvali];
}
hypre_BoxLoop2End(datai, dvali);
}
else if (action > -1)
{
hypre_BoxLoop2Begin(hypre_StructVectorDim(vector), loop_size,
data_box,data_start,data_stride,datai,
dval_box,dval_start,dval_stride,dvali);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(datai, dvali)
{
datap[datai] = values[dvali];
}
hypre_BoxLoop2End(datai, dvali);
}
else /* action < 0 */
{
hypre_BoxLoop2Begin(hypre_StructVectorDim(vector), loop_size,
data_box,data_start,data_stride,datai,
dval_box,dval_start,dval_stride,dvali);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai,dvali) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(datai, dvali)
{
values[dvali] = datap[datai];
}
hypre_BoxLoop2End(datai, dvali);
}
}
}
hypre_BoxDestroy(int_box);
hypre_BoxDestroy(dval_box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (outside > 0): clear values possibly outside of the grid extents
* (outside = 0): clear values only inside the grid extents
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorClearValues( hypre_StructVector *vector,
hypre_Index grid_index,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
double *vecp;
HYPRE_Int i, istart, istop;
if (outside > 0)
{
grid_boxes = hypre_StructVectorDataSpace(vector);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
}
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
if ((hypre_IndexX(grid_index) >= hypre_BoxIMinX(grid_box)) &&
(hypre_IndexX(grid_index) <= hypre_BoxIMaxX(grid_box)) &&
(hypre_IndexY(grid_index) >= hypre_BoxIMinY(grid_box)) &&
(hypre_IndexY(grid_index) <= hypre_BoxIMaxY(grid_box)) &&
(hypre_IndexZ(grid_index) >= hypre_BoxIMinZ(grid_box)) &&
(hypre_IndexZ(grid_index) <= hypre_BoxIMaxZ(grid_box)) )
{
vecp = hypre_StructVectorBoxDataValue(vector, i, grid_index);
*vecp = 0.0;
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* (outside > 0): clear values possibly outside of the grid extents
* (outside = 0): clear values only inside the grid extents
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorClearBoxValues( hypre_StructVector *vector,
hypre_Box *clear_box,
HYPRE_Int boxnum,
HYPRE_Int outside )
{
hypre_BoxArray *grid_boxes;
hypre_Box *grid_box;
hypre_Box *int_box;
hypre_BoxArray *data_space;
hypre_Box *data_box;
hypre_IndexRef data_start;
hypre_Index data_stride;
HYPRE_Int datai;
double *datap;
hypre_Index loop_size;
HYPRE_Int i, istart, istop;
/*-----------------------------------------------------------------------
* Initialize some things
*-----------------------------------------------------------------------*/
if (outside > 0)
{
grid_boxes = hypre_StructVectorDataSpace(vector);
}
else
{
grid_boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
}
data_space = hypre_StructVectorDataSpace(vector);
if (boxnum < 0)
{
istart = 0;
istop = hypre_BoxArraySize(grid_boxes);
}
else
{
istart = boxnum;
istop = istart + 1;
}
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(data_stride, 1, 1, 1);
int_box = hypre_BoxCreate();
for (i = istart; i < istop; i++)
{
grid_box = hypre_BoxArrayBox(grid_boxes, i);
data_box = hypre_BoxArrayBox(data_space, i);
hypre_IntersectBoxes(clear_box, grid_box, int_box);
/* if there was an intersection */
if (hypre_BoxVolume(int_box))
{
data_start = hypre_BoxIMin(int_box);
datap = hypre_StructVectorBoxData(vector, i);
hypre_BoxGetSize(int_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
data_box,data_start,data_stride,datai);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(datai)
{
datap[datai] = 0.0;
}
hypre_BoxLoop1End(datai);
}
}
hypre_BoxDestroy(int_box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorClearAllValues( hypre_StructVector *vector )
{
double *data = hypre_StructVectorData(vector);
HYPRE_Int data_size = hypre_StructVectorDataSize(vector);
HYPRE_Int i;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < data_size; i++)
{
data[i] = 0.0;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorSetNumGhost( hypre_StructVector *vector,
HYPRE_Int *num_ghost )
{
HYPRE_Int d, ndim = hypre_StructVectorDim(vector);
for (d = 0; d < ndim; d++)
{
hypre_StructVectorNumGhost(vector)[2*d] = num_ghost[2*d];
hypre_StructVectorNumGhost(vector)[2*d + 1] = num_ghost[2*d + 1];
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorAssemble( hypre_StructVector *vector )
{
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* copies data from x to y
* y has its own data array, so this is a deep copy in that sense.
* The grid and other size information are not copied - they are
* assumed to have already been set up to be consistent.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorCopy( hypre_StructVector *x,
hypre_StructVector *y )
{
hypre_Box *x_data_box;
HYPRE_Int vi;
double *xp, *yp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i;
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(unit_stride, 1, 1, 1);
boxes = hypre_StructGridBoxes( hypre_StructVectorGrid(x) );
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
x_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(x), i);
xp = hypre_StructVectorBoxData(x, i);
yp = hypre_StructVectorBoxData(y, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(x), loop_size,
x_data_box, start, unit_stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(vi)
{
yp[vi] = xp[vi];
}
hypre_BoxLoop1End(vi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorSetConstantValues( hypre_StructVector *vector,
double values )
{
hypre_Box *v_data_box;
HYPRE_Int vi;
double *vp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i;
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(unit_stride, 1, 1, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
start = hypre_BoxIMin(box);
v_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i);
vp = hypre_StructVectorBoxData(vector, i);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
v_data_box, start, unit_stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(vi)
{
vp[vi] = values;
}
hypre_BoxLoop1End(vi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Takes a function pointer of the form: double f(i,j,k)
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorSetFunctionValues( hypre_StructVector *vector,
double (*fcn)() )
{
hypre_Box *v_data_box;
HYPRE_Int vi;
double *vp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int b, i, j, k;
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(unit_stride, 1, 1, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
hypre_ForBoxI(b, boxes)
{
box = hypre_BoxArrayBox(boxes, b);
start = hypre_BoxIMin(box);
v_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), b);
vp = hypre_StructVectorBoxData(vector, b);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
v_data_box, start, unit_stride, vi);
i = hypre_IndexX(start);
j = hypre_IndexY(start);
k = hypre_IndexZ(start);
/* RDF: This won't work as written with threading on */
#if 0
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE
#endif
#else
hypre_BoxLoopSetOneBlock();
#endif
hypre_BoxLoop1For(vi)
{
vp[vi] = fcn(i, j, k);
i++;
j++;
k++;
}
hypre_BoxLoop1End(vi);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorClearGhostValues( hypre_StructVector *vector )
{
hypre_Box *v_data_box;
HYPRE_Int vi;
double *vp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_BoxArray *diff_boxes;
hypre_Box *diff_box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index unit_stride;
HYPRE_Int i, j;
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
hypre_SetIndex(unit_stride, 1, 1, 1);
boxes = hypre_StructGridBoxes(hypre_StructVectorGrid(vector));
diff_boxes = hypre_BoxArrayCreate(0);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
v_data_box = hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i);
hypre_BoxArraySetSize(diff_boxes, 0);
hypre_SubtractBoxes(v_data_box, box, diff_boxes);
vp = hypre_StructVectorBoxData(vector, i);
hypre_ForBoxI(j, diff_boxes)
{
diff_box = hypre_BoxArrayBox(diff_boxes, j);
start = hypre_BoxIMin(diff_box);
hypre_BoxGetSize(diff_box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
v_data_box, start, unit_stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(vi)
{
vp[vi] = 0.0;
}
hypre_BoxLoop1End(vi);
}
}
hypre_BoxArrayDestroy(diff_boxes);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* clears vector values on the physical boundaries
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorClearBoundGhostValues( hypre_StructVector *vector,
HYPRE_Int force )
{
HYPRE_Int vi;
double *vp;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Box *v_data_box;
hypre_Index loop_size;
hypre_IndexRef start;
hypre_Index stride;
hypre_Box *bbox;
hypre_StructGrid *grid;
hypre_BoxArray *boundary_boxes;
hypre_BoxArray *array_of_box;
hypre_BoxArray *work_boxarray;
HYPRE_Int i, i2;
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
/* Only clear if not clear already or if force argument is set */
if (hypre_StructVectorBGhostNotClear(vector) || force)
{
grid = hypre_StructVectorGrid(vector);
boxes = hypre_StructGridBoxes(grid);
hypre_SetIndex(stride, 1, 1, 1);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
boundary_boxes = hypre_BoxArrayCreate( 0 );
v_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i);
hypre_BoxBoundaryG( v_data_box, grid, boundary_boxes );
vp = hypre_StructVectorBoxData(vector, i);
/* box is a grid box, no ghost zones.
v_data_box is vector data box, may or may not have ghost zones
To get only ghost zones, subtract box from boundary_boxes. */
work_boxarray = hypre_BoxArrayCreate( 0 );
array_of_box = hypre_BoxArrayCreate( 1 );
hypre_BoxArrayBoxes(array_of_box)[0] = *box;
hypre_SubtractBoxArrays( boundary_boxes, array_of_box, work_boxarray );
hypre_ForBoxI(i2, boundary_boxes)
{
bbox = hypre_BoxArrayBox(boundary_boxes, i2);
hypre_BoxGetSize(bbox, loop_size);
start = hypre_BoxIMin(bbox);
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
v_data_box, start, stride, vi);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,vi ) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(vi)
{
vp[vi] = 0.0;
}
hypre_BoxLoop1End(vi);
}
hypre_BoxArrayDestroy(boundary_boxes);
hypre_BoxArrayDestroy(work_boxarray);
hypre_BoxArrayDestroy(array_of_box);
}
hypre_StructVectorBGhostNotClear(vector) = 0;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorScaleValues( hypre_StructVector *vector, double factor )
{
HYPRE_Int datai;
double *data;
hypre_Index imin;
hypre_Index imax;
hypre_Box *box;
hypre_Index loop_size;
/*-----------------------------------------------------------------------
* Set the vector coefficients
*-----------------------------------------------------------------------*/
box = hypre_BoxCreate();
hypre_SetIndex(imin, 1, 1, 1);
hypre_SetIndex(imax, hypre_StructVectorDataSize(vector), 1, 1);
hypre_BoxSetExtents(box, imin, imax);
data = hypre_StructVectorData(vector);
hypre_BoxGetSize(box, loop_size);
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
box, imin, imin, datai);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(datai)
{
data[datai] *= factor;
}
hypre_BoxLoop1End(datai);
hypre_BoxDestroy(box);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
hypre_CommPkg *
hypre_StructVectorGetMigrateCommPkg( hypre_StructVector *from_vector,
hypre_StructVector *to_vector )
{
hypre_CommInfo *comm_info;
hypre_CommPkg *comm_pkg;
/*------------------------------------------------------
* Set up hypre_CommPkg
*------------------------------------------------------*/
hypre_CreateCommInfoFromGrids(hypre_StructVectorGrid(from_vector),
hypre_StructVectorGrid(to_vector),
&comm_info);
hypre_CommPkgCreate(comm_info,
hypre_StructVectorDataSpace(from_vector),
hypre_StructVectorDataSpace(to_vector), 1, NULL, 0,
hypre_StructVectorComm(from_vector), &comm_pkg);
hypre_CommInfoDestroy(comm_info);
/* is this correct for periodic? */
return comm_pkg;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorMigrate( hypre_CommPkg *comm_pkg,
hypre_StructVector *from_vector,
hypre_StructVector *to_vector )
{
hypre_CommHandle *comm_handle;
/*-----------------------------------------------------------------------
* Migrate the vector data
*-----------------------------------------------------------------------*/
hypre_InitializeCommunication(comm_pkg,
hypre_StructVectorData(from_vector),
hypre_StructVectorData(to_vector), 0, 0,
&comm_handle);
hypre_FinalizeCommunication(comm_handle);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_StructVectorPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorPrint( const char *filename,
hypre_StructVector *vector,
HYPRE_Int all )
{
FILE *file;
char new_filename[255];
hypre_StructGrid *grid;
hypre_BoxArray *boxes;
hypre_BoxArray *data_space;
HYPRE_Int myid;
/*----------------------------------------
* Open file
*----------------------------------------*/
hypre_MPI_Comm_rank(hypre_StructVectorComm(vector), &myid );
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "w")) == NULL)
{
hypre_printf("Error: can't open output file %s\n", new_filename);
exit(1);
}
/*----------------------------------------
* Print header info
*----------------------------------------*/
hypre_fprintf(file, "StructVector\n");
/* print grid info */
hypre_fprintf(file, "\nGrid:\n");
grid = hypre_StructVectorGrid(vector);
hypre_StructGridPrint(file, grid);
/*----------------------------------------
* Print data
*----------------------------------------*/
data_space = hypre_StructVectorDataSpace(vector);
if (all)
boxes = data_space;
else
boxes = hypre_StructGridBoxes(grid);
hypre_fprintf(file, "\nData:\n");
hypre_PrintBoxArrayData(file, boxes, data_space, 1,
hypre_StructVectorData(vector));
/*----------------------------------------
* Close file
*----------------------------------------*/
fflush(file);
fclose(file);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
hypre_StructVector *
hypre_StructVectorRead( MPI_Comm comm,
const char *filename,
HYPRE_Int *num_ghost )
{
FILE *file;
char new_filename[255];
hypre_StructVector *vector;
hypre_StructGrid *grid;
hypre_BoxArray *boxes;
hypre_BoxArray *data_space;
HYPRE_Int myid;
/*----------------------------------------
* Open file
*----------------------------------------*/
#ifdef HYPRE_USE_PTHREADS
#if hypre_MPI_Comm_rank == hypre_thread_MPI_Comm_rank
#undef hypre_MPI_Comm_rank
#endif
#endif
hypre_MPI_Comm_rank(comm, &myid );
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_printf("Error: can't open output file %s\n", new_filename);
exit(1);
}
/*----------------------------------------
* Read header info
*----------------------------------------*/
hypre_fscanf(file, "StructVector\n");
/* read grid info */
hypre_fscanf(file, "\nGrid:\n");
hypre_StructGridRead(comm,file,&grid);
/*----------------------------------------
* Initialize the vector
*----------------------------------------*/
vector = hypre_StructVectorCreate(comm, grid);
hypre_StructVectorSetNumGhost(vector, num_ghost);
hypre_StructVectorInitialize(vector);
/*----------------------------------------
* Read data
*----------------------------------------*/
boxes = hypre_StructGridBoxes(grid);
data_space = hypre_StructVectorDataSpace(vector);
hypre_fscanf(file, "\nData:\n");
hypre_ReadBoxArrayData(file, boxes, data_space, 1,
hypre_StructVectorData(vector));
/*----------------------------------------
* Assemble the vector
*----------------------------------------*/
hypre_StructVectorAssemble(vector);
/*----------------------------------------
* Close file
*----------------------------------------*/
fclose(file);
return vector;
}
/*--------------------------------------------------------------------------
* The following is used only as a debugging aid.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_StructVectorMaxValue( hypre_StructVector *vector,
double *max_value, HYPRE_Int *max_index,
hypre_Index max_xyz_index )
/* Input: vector, and pointers to where to put returned data.
Return value: error flag, 0 means ok.
Finds the maximum value in a vector, puts it in max_value.
The corresponding index is put in max_index.
A hypre_Index corresponding to max_index is put in max_xyz_index.
We assume that there is only one box to deal with. */
{
HYPRE_Int datai;
double *data;
hypre_Index imin;
hypre_BoxArray *boxes;
hypre_Box *box;
hypre_Index loop_size;
hypre_Index unit_stride;
HYPRE_Int i;
double maxvalue;
HYPRE_Int maxindex;
boxes = hypre_StructVectorDataSpace(vector);
if ( hypre_BoxArraySize(boxes)!=1 )
{
/* if more than one box, the return system max_xyz_index is too simple
if needed, fix later */
hypre_error(HYPRE_ERROR_GENERIC);
return hypre_error_flag;
}
hypre_SetIndex(unit_stride, 1, 1, 1);
hypre_ForBoxI(i, boxes)
{
box = hypre_BoxArrayBox(boxes, i);
/*v_data_box =
hypre_BoxArrayBox(hypre_StructVectorDataSpace(vector), i);*/
data = hypre_StructVectorBoxData(vector, i);
hypre_BoxGetSize(box, loop_size);
hypre_CopyIndex( hypre_BoxIMin(box), imin );
hypre_BoxLoop1Begin(hypre_StructVectorDim(vector), loop_size,
box, imin, unit_stride, datai);
maxindex = hypre_BoxIndexRank( box, imin );
maxvalue = data[maxindex];
hypre_ClearIndex(max_xyz_index);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,datai) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop1For(datai)
{
if ( data[datai] > maxvalue )
{
maxvalue = data[datai];
maxindex = datai;
hypre_BoxLoopGetIndex(max_xyz_index);
}
}
hypre_BoxLoop1End(datai);
hypre_AddIndex(max_xyz_index, imin, max_xyz_index);
}
*max_value = maxvalue;
*max_index = maxindex;
return hypre_error_flag;
}
|
sylcount.c | #include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <stdbool.h>
#include <stdint.h>
#include <math.h>
#include "include/reactor.h"
#include "include/RNACI.h"
#include "include/sylcount.h"
#define BUFLEN 64
#define ITER_PER_CHECK 256
#define CHARPT(x,i) ((char*)CHAR(STRING_ELT(x,i)))
static inline bool is_sentend(const char c)
{
return (c=='.' || c==';' || c=='!' || c=='?');
}
static inline bool is_wordend(const char c)
{
return (isspace(c) || ispunct(c) || c == '\0');
}
// -------------------------------------------------------
// Various "readability" score-ers
// -------------------------------------------------------
// Flesch reading ease
static inline double re_score(const uint32_t tot_words, const uint32_t tot_sents, const uint32_t tot_sylls)
{
return 206.835 - 1.015*((double) tot_words/tot_sents) - 84.6*((double) tot_sylls/tot_words);
}
// Flesch-Kincaid grade level
static inline double gl_score(const uint32_t tot_words, const uint32_t tot_sents, const uint32_t tot_sylls)
{
return 0.39 * ((double) tot_words/tot_sents) + 11.8 * ((double) tot_sylls/tot_words) - 15.59;
}
// Automated Readability Index
static inline int ari_score(const uint32_t tot_chars, const uint32_t tot_words, const uint32_t tot_sents)
{
return (int) ceil(4.71 * ((double) tot_chars/tot_words) + 0.5 * ((double) tot_words/tot_sents) - 21.43);
}
// Simple Measure of Gobbledygook
static inline double smog_score(const uint32_t tot_polys, const uint32_t tot_sents)
{
return 1.043 * sqrt(30.0 * ((double) tot_polys/tot_sents)) + 3.1291;
}
// Coleman-Liau
static inline double cl_score(const uint32_t tot_chars, const uint32_t tot_words, const uint32_t tot_sents)
{
return 0.0588 * ((double) 100.0 * tot_chars/tot_words) - 0.296 * ((double) 100.0 * tot_sents/tot_words) - 15.8;
}
static inline void counts_set_degenerate(SEXP chars, SEXP wordchars, SEXP words, SEXP nw,
SEXP sents, SEXP sylls, SEXP polys, const int i)
{
INT(chars, i) = 0;
INT(wordchars, i) = 0;
INT(words, i) = 0;
INT(nw, i) = 0;
INT(sents, i) = 0;
INT(sylls, i) = 0;
INT(polys, i) = 0;
}
static inline void scores_set_degenerate(SEXP re, SEXP gl, SEXP ari, SEXP smog,
SEXP cl, const int i)
{
DBL(re, i) = R_NaN;
DBL(gl, i) = R_NaN;
INT(ari, i) = NA_INTEGER;
DBL(smog, i) = R_NaN;
DBL(cl, i) = R_NaN;
}
SEXP R_readability(SEXP s_, SEXP nthreads_)
{
SEXP ret, ret_names;
SEXP chars, wordchars, words, nw, sents, sylls, polys;
SEXP ari, re, gl, smog, cl;
CHECK_IS_STRINGS(s_);
CHECK_IS_POSINT(nthreads_, "nthreads");
const int len = LENGTH(s_);
int nthreads = asInteger(nthreads_);
newRvec(chars, len, "int");
newRvec(wordchars, len, "int");
newRvec(words, len, "int");
newRvec(nw, len, "int");
newRvec(sents, len, "int");
newRvec(sylls, len, "int");
newRvec(polys, len, "int");
newRvec(re, len, "dbl");
newRvec(gl, len, "dbl");
newRvec(ari, len, "int");
newRvec(smog, len, "dbl");
newRvec(cl, len, "dbl");
#ifdef _OPENMP
#pragma omp parallel num_threads(nthreads)
#endif
{
char buf[BUFLEN];
#ifdef _OPENMP
#pragma omp for
#endif
for (int i=0; i<len; i++)
{
const char *const s = CHARPT(s_, i);
const int slen = strlen(s);
int j = 0;
while (j < slen && s[j] == ' ')
j++;
if (slen == 0 || j == slen)
{
counts_set_degenerate(chars, wordchars, words, nw, sents, sylls, polys, i);
scores_set_degenerate(re, gl, ari, smog, cl, i);
continue;
}
uint32_t tot_wordchars = 0;
uint32_t tot_words = 0;
uint32_t tot_nonwords = 0;
uint32_t tot_sents = 0;
uint32_t tot_sylls = 0;
uint32_t tot_polys = 0;
int start = 0;
int end;
for (; j<=slen && slen>0; j++)
{
if (isalnum(s[j]))
tot_wordchars++;
else if (is_sentend(s[j]))
tot_sents++;
if (is_wordend(s[j]))
{
// try to account for acronyms
while (ispunct(s[j]) && !isspace(s[j+1]))
j++;
end = j;
if (end-start > BUFLEN)
{
tot_nonwords++;
continue;
}
else
tot_words++;
memcpy(buf, s+start, end-start);
buf[end-start] = '\0';
uint32_t word_sylls = count_syllables(buf, end-start);
tot_sylls += word_sylls;
if (word_sylls > 2)
tot_polys++;
if (is_sentend(s[j]))
tot_sents++;
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
if (isalnum(s[j]))
tot_wordchars++;
}
}
INT(chars, i) = slen;
INT(wordchars, i) = tot_wordchars;
INT(words, i) = tot_words;
INT(nw, i) = tot_nonwords;
INT(sents, i) = tot_sents;
INT(sylls, i) = tot_sylls;
INT(polys, i) = tot_polys;
DBL(re, i) = re_score(tot_words, tot_sents, tot_sylls);
DBL(gl, i) = gl_score(tot_words, tot_sents, tot_sylls);
INT(ari, i) = ari_score(tot_wordchars, tot_words, tot_sents);
DBL(smog, i) = smog_score(tot_polys, tot_sents);
DBL(cl, i) = cl_score(tot_wordchars, tot_words, tot_sents);
}
}
make_list_names(ret_names, 12, "chars", "wordchars", "words", "nonwords", "sents", "sylls", "polys", "re", "gl", "ari", "smog", "cl");
make_dataframe(ret, RNULL, ret_names, 12, chars, wordchars, words, nw, sents, sylls, polys, re, gl, ari, smog, cl);
R_END;
return ret;
}
// -------------------------------------------------------
// Syllable counter
// can not be put into separate file because gperf data isn't guarded correctly
// -------------------------------------------------------
static inline int count_words(const int len, const char*const restrict buf)
{
int nw = 0;
for (int i=0; i<=len; i++)
{
if (is_wordend(buf[i]))
{
nw++;
while (ispunct(buf[i]) || isspace(buf[i]))
i++;
}
}
return nw;
}
// NOTE: not thread safe because of the R object memory allocations
static SEXP R_sylcount_countsAndWords(SEXP s_)
{
SEXP ret;
const int len = LENGTH(s_);
newRlist(ret, len);
for (int i=0; i<len; i++)
{
SEXP localdf, localdf_names;
SEXP word, sylls;
const char *const s = CHARPT(s_, i);
const int slen = strlen(s);
int nwords = count_words(slen, s);
newRvec(word, nwords, "str");
newRvec(sylls, nwords, "int");
make_list_names(localdf_names, 2, "word", "syllables");
make_dataframe(localdf, RNULL, localdf_names, 2, word, sylls);
SET_VECTOR_ELT(ret, i, localdf);
int start = 0;
int end;
int words_found = 0;
for (int j=0; j<=slen; j++)
{
if (is_wordend(s[j]))
{
end = j;
const int wordlen = end-start;
SET_STRING_ELT(word, words_found, mkCharLen(s+start, wordlen));
if (wordlen > BUFLEN)
INT(sylls, words_found) = NA_INTEGER;
else
INT(sylls, words_found) = count_syllables(CHARPT(word, words_found), wordlen);
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
words_found++;
}
}
UNPROTECT(4);
}
// R_END;
UNPROTECT(1);
return ret;
}
// NOTE: not thread safe because of the R object memory allocations
static SEXP R_sylcount_countsOnly(SEXP s_)
{
SEXP ret;
char buf[BUFLEN];
const int len = LENGTH(s_);
newRlist(ret, len);
for (int i=0; i<len; i++)
{
SEXP sylls;
const char*const s = CHARPT(s_, i);
const int slen = strlen(s);
if (slen == 0)
{
SET_VECTOR_ELT(ret, i, ScalarInteger(NA_INTEGER));
continue;
}
int nwords = count_words(slen, s);
newRvec(sylls, nwords, "int");
SET_VECTOR_ELT(ret, i, sylls);
int start = 0;
int end;
int words_found = 0;
for (int j=0; j<=slen; j++)
{
if (is_wordend(s[j]))
{
end = j;
const int wordlen = end - start;
if (wordlen > BUFLEN)
INT(sylls, words_found) = NA_INTEGER;
else
{
memcpy(buf, s+start, wordlen);
buf[wordlen] = '\0';
INT(sylls, words_found) = count_syllables(buf, wordlen);
}
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
words_found++;
}
}
UNPROTECT(1);
}
// R_END;
UNPROTECT(1);
return ret;
}
SEXP R_sylcount(SEXP s, SEXP counts_only)
{
CHECK_IS_STRINGS(s);
CHECK_IS_FLAG(counts_only, "counts.only");
if (INT(counts_only))
return R_sylcount_countsOnly(s);
else
return R_sylcount_countsAndWords(s);
}
// -------------------------------------------------------
// Basic text document count summaries
// -------------------------------------------------------
SEXP R_corpus_summary(SEXP s_, SEXP nthreads_)
{
SEXP ret, ret_names;
SEXP chars, wordchars, words, nw, sents, sylls, polys;
CHECK_IS_STRINGS(s_);
CHECK_IS_POSINT(nthreads_, "nthreads");
const int len = LENGTH(s_);
int nthreads = asInteger(nthreads_);
newRvec(chars, len, "int");
newRvec(wordchars, len, "int");
newRvec(words, len, "int");
newRvec(nw, len, "int");
newRvec(sents, len, "int");
newRvec(sylls, len, "int");
newRvec(polys, len, "int");
#ifdef _OPENMP
#pragma omp parallel num_threads(nthreads)
#endif
{
char buf[BUFLEN];
#ifdef _OPENMP
#pragma omp for
#endif
for (int i=0; i<len; i++)
{
const char *const s = CHARPT(s_, i);
const int slen = strlen(s);
int j = 0;
while (j < slen && s[j] == ' ')
j++;
if (slen == 0 || j == slen)
{
counts_set_degenerate(chars, wordchars, words, nw, sents, sylls, polys, i);
continue;
}
uint32_t tot_wordchars = 0;
uint32_t tot_words = 0;
uint32_t tot_nonwords = 0;
uint32_t tot_sents = 0;
uint32_t tot_sylls = 0;
uint32_t tot_polys = 0;
int start = 0;
int end;
for (; j<=slen && slen>0; j++)
{
if (isalnum(s[j]))
tot_wordchars++;
else if (is_sentend(s[j]))
tot_sents++;
if (is_wordend(s[j]))
{
// try to account for acronyms
while (ispunct(s[j]) && !isspace(s[j+1]))
j++;
end = j;
if (end-start+1 > BUFLEN)
{
tot_nonwords++;
continue;
}
else
tot_words++;
memcpy(buf, s+start, end-start);
buf[end-start] = '\0';
uint32_t word_sylls = count_syllables(buf, end-start);
tot_sylls += word_sylls;
if (word_sylls > 2)
tot_polys++;
if (is_sentend(s[j]))
tot_sents++;
while (ispunct(s[j]) || isspace(s[j]))
j++;
start = j;
if (isalnum(s[j]))
tot_wordchars++;
}
}
INT(chars, i) = slen;
INT(wordchars, i) = tot_wordchars;
INT(words, i) = tot_words;
INT(nw, i) = tot_nonwords;
INT(sents, i) = tot_sents;
INT(sylls, i) = tot_sylls;
INT(polys, i) = tot_polys;
}
}
make_list_names(ret_names, 7, "chars", "wordchars", "words", "nonwords", "sents", "sylls", "polys");
make_dataframe(ret, RNULL, ret_names, 7, chars, wordchars, words, nw, sents, sylls, polys);
R_END;
return ret;
}
|
CMarchingCubesFast.h | ///////////////////////////////////////////////////////////////////////////////
// $Id$
//
// 3DimViewer
// Lightweight 3D DICOM viewer.
//
// Copyright 2008-2016 3Dim Laboratory s.r.o.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef CMarchingCubesFast_H
#define CMarchingCubesFast_H
////////////////////////////////////////////////////////////
// Includes
#include <geometry/base/types.h>
// VPL
#include <VPL/Image/DensityVolume.h>
#include <VPL/Module/Serializable.h>
#include <VPL/Module/Progress.h>
#include <VPL/Image/Vector3.h>
// STL
#include <vector>
#include <set>
#include <map>
////////////////////////////////////////////////////////////
//! Fast marching cubes functor
class IMarchingCubesFastFunctor
{
public:
virtual unsigned char operator()(int x, int y, int z) const = 0;
virtual vpl::img::CSize3i getVolumeDimensions() const = 0;
virtual vpl::img::CSize3d getVoxelSize() const = 0;
};
////////////////////////////////////////////////////////////
//! Marching cubes worker - MC algorithm implementation class.
//! Simplified MC algorithm, that only processes given volume and fills coords a indicies vectors.
//! Doesn't use OpenMesh.
class CMarchingCubesWorkerFast
{
private:
vpl::img::CSize3d m_voxelSize;
int m_index;
//! Pointer on up codes of cubes.
unsigned char *m_cube_code_matrix;
//! Pointer on down codes of cubes.
unsigned char *m_down_cube_code_matrix;
//! Pointer on voxels state matrix, up and down.
unsigned char *m_state_matrix_up;
unsigned char *m_state_matrix_down;
//! Actual cube vertices coordinates.
geometry::Vec3 m_cube_vertices[8];
//! Pointer on up work matrices of edges vertices and indicies.
bool *m_node_matrix_up_h;
bool *m_node_matrix_up_v;
int *m_node_matrix_up_h_index;
int *m_node_matrix_up_v_index;
//! Pointer on down work matrices of edges vertices and indicies.
bool *m_node_matrix_down_h;
bool *m_node_matrix_down_v;
int *m_node_matrix_down_h_index;
int *m_node_matrix_down_v_index;
//! Pointer on middle work matrices of edges vertices and indicies.
bool *m_node_matrix_middle;
int *m_node_matrix_middle_index;
//! Size of help work matrices.
int m_work_matrices_size_x;
int m_work_matrices_size_y;
//! actual volume grid size
vpl::tSize m_volume_start_x;
vpl::tSize m_volume_start_y;
vpl::tSize m_volume_start_z;
vpl::tSize m_volume_end_x;
vpl::tSize m_volume_end_y;
vpl::tSize m_volume_end_z;
public:
CMarchingCubesWorkerFast();
~CMarchingCubesWorkerFast();
//! Set work area - in voxels.
void setVolumeOfInterest(vpl::tSize startX, vpl::tSize startY, vpl::tSize startZ, vpl::tSize endX, vpl::tSize endY, vpl::tSize endZ);
//! Marching cubes mesh generation.
//! param xCoords Vector of x coordinates of final vertices.
//! param yCoords Vector of y coordinates of final vertices.
//! param zCoords Vector of z coordinates of final vertices.
//! param indicies Vector of indicies marking final triangles.
bool generateMesh(std::vector<double> &xCoords, std::vector<double> &yCoords, std::vector<double> &zCoords, std::vector<int> &indicies, const IMarchingCubesFastFunctor *volume_functor);
protected:
//! (De)Allocation of work matrices.
void allocWorktMem(int size_x, int size_y);
void deallocWorktMem();
//! Make tris for actual cube, given coordinates of processed planes and cube code.
//! param xCoords Vector of x coordinates of final vertices.
//! param yCoords Vector of y coordinates of final vertices.
//! param zCoords Vector of z coordinates of final vertices.
//! param indicies Vector of indicies marking final triangles.
void makeTri(std::vector<double> &xCoords, std::vector<double> &yCoords, std::vector<double> &zCoords, std::vector<int> &indicies, int x, int y, unsigned char cube_code);
void holeFilling(std::vector<double> &xCoords, std::vector<double> &yCoords, std::vector<double> &zCoords, std::vector<int> &indicies, int x, int y, unsigned char cube_code);
//! Get/Set pointer on vertex of cube node by given coordinates and cube edge index.
bool getCubeEdgeNode(int edge_index, int x, int y, int &index);
void setCubeEdgeNode(int edge_index, int x, int y, bool new_vertex);
//! Set cube node Z/Y/X coordinates.
void setCodeCoordinateZ(double z, double dz);
void setCodeCoordinateY(double y, double dy);
void setCodeCoordinateX(double x, double dx);
//! Get cube code of down/front/left cube.
unsigned char getCubeCodeDown(int x, int y);
unsigned char getCubeCodeFront(int x, int y);
unsigned char getCubeCodeLeft(int x, int y);
//! Create actual cube code and save it into code matrix
unsigned char makeCubeCode(int x, int y);
//! Calculate number of nodes for cube code.
int cubeCodeNodeNumber(unsigned char cube_code);
};
////////////////////////////////////////////////////////////
//! Fast marching cubes class.
//! Generates mesh using parallel by using CMarchingCubesWorkerFast.
//! Simplified algorithm with no postprocessing.
//! Uses tpContainer for output and fill it with final vertices and indices of the triangles.
//! Output container must have methods addVertex(double, double, double) and addIndex(int).
template<class tpContainer>
class CMarchingCubesFast
{
private:
//! actual volume voxel size
vpl::img::CSize3d m_voxelSize;
public:
//! Marching cubes class constructor.
CMarchingCubesFast() {}
//! Marching cubes class destructor.
~CMarchingCubesFast() {}
//! Marching cubes triangle mesh generation.
bool generateMesh(tpContainer *output, const IMarchingCubesFastFunctor *volumeFunctor)
{
const vpl::tSize desiredCubesOnEdge = 8;
vpl::img::CSize3i volumeSize = volumeFunctor->getVolumeDimensions();
vpl::img::CSize3i cubeSize;
cubeSize.x() = (volumeSize.x() + desiredCubesOnEdge - 1) / desiredCubesOnEdge;
cubeSize.y() = (volumeSize.y() + desiredCubesOnEdge - 1) / desiredCubesOnEdge;
cubeSize.z() = (volumeSize.z() + desiredCubesOnEdge - 1) / desiredCubesOnEdge;
vpl::img::CVector3i cubesOnEdge;
cubesOnEdge.x() = volumeSize.x() / cubeSize.x() + 1;
cubesOnEdge.y() = volumeSize.y() / cubeSize.y() + 1;
cubesOnEdge.z() = volumeSize.z() / cubeSize.z() + 1;
const int workerCount = cubesOnEdge.x() * cubesOnEdge.y() * cubesOnEdge.z();
std::vector<CMarchingCubesWorkerFast> workers(workerCount);
// initialize workers
for (vpl::tSize z = 0; z < cubesOnEdge.z(); ++z)
{
for (vpl::tSize y = 0; y < cubesOnEdge.y(); ++y)
{
for (vpl::tSize x = 0; x < cubesOnEdge.x(); ++x)
{
vpl::tSize index = z * cubesOnEdge.y() * cubesOnEdge.x() + y * cubesOnEdge.x() + x;
CMarchingCubesWorkerFast &worker = workers[index];
worker.setVolumeOfInterest(
std::min(x * cubeSize.x(), volumeSize.x()),
std::min(y * cubeSize.y(), volumeSize.y()),
std::min(z * cubeSize.z(), volumeSize.z()),
std::min((x + 1) * cubeSize.x(), volumeSize.x() + 1),
std::min((y + 1) * cubeSize.y(), volumeSize.y() + 1),
std::min((z + 1) * cubeSize.z(), volumeSize.z() + 1));
}
}
}
std::vector<std::vector<double> > xCoords;
std::vector<std::vector<double> > yCoords;
std::vector<std::vector<double> > zCoords;
std::vector<std::vector<int> > indicies;
xCoords.resize(workerCount);
yCoords.resize(workerCount);
zCoords.resize(workerCount);
indicies.resize(workerCount);
// generate submeshes
#pragma omp parallel for
for (int i = 0; i < workerCount; ++i)
{
workers[i].generateMesh(xCoords[i], yCoords[i], zCoords[i], indicies[i], volumeFunctor);
}
int verticesSum = 0;
for (int i = 0; i < workerCount; ++i)
{
size_t xCoordsSize = xCoords[i].size();
size_t yCoordsSize = yCoords[i].size();
size_t zCoordsSize = zCoords[i].size();
assert(xCoordsSize == yCoordsSize && yCoordsSize == zCoordsSize);
size_t indiciesSize = indicies[i].size();
for (size_t x = 0; x < xCoordsSize; ++x)
{
output->addVertex(xCoords[i][x], yCoords[i][x], zCoords[i][x]);
}
for (size_t x = 0; x < indiciesSize; ++x)
{
output->addIndex(indicies[i][x] + verticesSum);
}
verticesSum += xCoordsSize;
}
return true;
}
};
#endif // CMarchingCubesFast_H
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
|
NonUniformScheme.h | /*
* NonUniformScheme.h
* CubismUP_3D
*
* Created by Fabian Wermelinger 05/08/2017
* Copyright 2017 ETH Zurich. All rights reserved.
*
*/
#ifndef CubismUP_3D_NonUniformScheme_h
#define CubismUP_3D_NonUniformScheme_h
#include "../Definitions.h"
#include <Cubism/BlockInfo.h>
#include <Cubism/MeshMap.h>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <vector>
CubismUP_3D_NAMESPACE_BEGIN
template <typename TBlock>
class NonUniformScheme
{
static constexpr size_t StencilMax = 3;
public:
NonUniformScheme(
const double xS, const double xE,
const double yS, const double yE,
const double zS, const double zE,
const unsigned int nBlocksX, const unsigned int nBlocksY,
const unsigned int nBlocksZ) :
m_h_min{HUGE_VAL,HUGE_VAL,HUGE_VAL}, m_h_max{-1,-1,-1},
m_initialized(false),
m_map_x(xS,xE,nBlocksX),
m_map_y(yS,yE,nBlocksY),
m_map_z(zS,zE,nBlocksZ)
{}
~NonUniformScheme() {}
typedef cubism::MeshMap<TBlock> TMeshMap;
template <int _S, int _E>
class HaloVector
{
public:
static const int START = _S;
static const int END = _E;
inline double operator()(const int i) const { return m_data[i+_S]; }
inline double& operator()(const int i) { return m_data[i+_S]; }
inline void clear() { std::vector<double>().swap(m_data); }
inline void fill(TMeshMap& mmap, const double* const halos)
{
m_data.resize(mmap.ncells() + _S + _E);
m_data.insert(m_data.begin(), &halos[0], &halos[_S]);
m_data.insert(m_data.begin()+_S, mmap.data_grid_spacing(), mmap.data_grid_spacing()+mmap.ncells());
m_data.insert(m_data.begin()+_S+mmap.ncells(), &halos[_S], &halos[_S+_E]);
}
private:
std::vector<double> m_data;
};
typedef HaloVector<StencilMax,StencilMax> TVector;
void init(const cubism::MeshDensity* const kernel_x,
const cubism::MeshDensity* const kernel_y,
const cubism::MeshDensity* const kernel_z)
{
double ghosts[2*StencilMax];
m_map_x.init(kernel_x, StencilMax, StencilMax, &ghosts[0]);
m_all_delta_x.fill(m_map_x, &ghosts[0]);
m_map_y.init(kernel_y, StencilMax, StencilMax, &ghosts[0]);
m_all_delta_y.fill(m_map_y, &ghosts[0]);
m_map_z.init(kernel_z, StencilMax, StencilMax, &ghosts[0]);
m_all_delta_z.fill(m_map_z, &ghosts[0]);
for (size_t i = 0; i < m_map_x.ncells(); ++i) {
m_h_max[0] = std::max( m_h_max[0], m_map_x.cell_width(i) );
m_h_min[0] = std::min( m_h_min[0], m_map_x.cell_width(i) );
//if(m_map_x.cell_width(i)>m_h_max[0]) m_h_max[0]=m_map_x.cell_width(i);
//if(m_map_x.cell_width(i)<m_h_min[0]) m_h_min[0]=m_map_x.cell_width(i);
}
for (size_t i = 0; i < m_map_y.ncells(); ++i) {
m_h_max[1] = std::max( m_h_max[1], m_map_y.cell_width(i) );
m_h_min[1] = std::min( m_h_min[1], m_map_y.cell_width(i) );
//if(m_map_y.cell_width(i)>m_h_max[1]) m_h_max[1]=m_map_y.cell_width(i);
//if(m_map_y.cell_width(i)<m_h_min[1]) m_h_min[1]=m_map_y.cell_width(i);
}
for (size_t i = 0; i < m_map_z.ncells(); ++i) {
m_h_max[2] = std::max( m_h_max[2], m_map_z.cell_width(i) );
m_h_min[2] = std::min( m_h_min[2], m_map_z.cell_width(i) );
//if(m_map_z.cell_width(i)>m_h_max[2]) m_h_max[2]=m_map_z.cell_width(i);
//if(m_map_z.cell_width(i)<m_h_min[2]) m_h_min[2]=m_map_z.cell_width(i);
}
assert( m_h_max[0]>=m_h_min[0] );
assert( m_h_max[1]>=m_h_min[1] );
assert( m_h_max[2]>=m_h_min[2] );
m_initialized = true;
}
template <typename TFD>
void setup_coefficients(std::vector<cubism::BlockInfo>& infos,
const bool cleanup = false)
{
if (!m_initialized)
{
fprintf(stderr,"ERROR: NonUniformScheme: Not initialized.\n");
fflush(0); exit(1);
}
// 0. some checks
// 1. compute coefficients for scheme TFD
// 2. distribute coefficients over blocks
// 3. cleanup (optional, if multiple schemes are setup, cleanup only at
// the end of initializing the last scheme.)
// 0.
assert(TFD::HALO_S <= StencilMax);
assert(TFD::HALO_E <= StencilMax);
// 1. non-uniform finite differences
TFD fd_x(m_map_x.ncells());
TFD fd_y(m_map_y.ncells());
TFD fd_z(m_map_z.ncells());
fd_x.setup(&m_all_delta_x(0), m_map_x.ncells());
fd_y.setup(&m_all_delta_y(0), m_map_y.ncells());
fd_z.setup(&m_all_delta_z(0), m_map_z.ncells());
typename TFD::template BlockSetFunctor<CUP_BLOCK_SIZE> set_x;
typename TFD::template BlockSetFunctor<CUP_BLOCK_SIZE> set_y;
typename TFD::template BlockSetFunctor<CUP_BLOCK_SIZE> set_z;
// 2.
#pragma omp parallel for
for(size_t i=0; i<infos.size(); ++i)
{
cubism::BlockInfo info = infos[i];
TBlock& b = *(TBlock*)info.ptrBlock;
{
const int index = info.index[0];
const unsigned int offset = TBlock::sizeX * index;
set_x(fd_x, b.fd_cx, offset);
}
{
const int index = info.index[1];
const unsigned int offset = TBlock::sizeY * index;
set_y(fd_y, b.fd_cy, offset);
}
{
const int index = info.index[2];
const unsigned int offset = TBlock::sizeZ * index;
set_z(fd_z, b.fd_cz, offset);
}
}
// 3.
if (cleanup)
{
m_all_delta_x.clear();
m_all_delta_y.clear();
m_all_delta_z.clear();
}
}
// TODO: [fabianw@mavt.ethz.ch; Mon Jan 22 2018 07:44:01 PM (+0100)] Is this
/*
void setup_inverse_spacing(std::vector<cubism::BlockInfo>& infos)
{
if (!m_initialized)
{
fprintf(stderr,"ERROR: NonUniformScheme: Not initialized.\n");
fflush(0); exit(1);
}
#pragma omp parallel for
for(int i=0; i<(int)infos.size(); ++i)
{
cubism::BlockInfo info = infos[i];
TBlock& b = *(TBlock*)info.ptrBlock;
const int indx = info.index[0];
_set_block_invh<CUP_BLOCK_SIZE>(m_map_x.get_grid_spacing(indx), &b.invh_x[0]);
const int indy = info.index[1];
_set_block_invh<CUP_BLOCK_SIZE>(m_map_y.get_grid_spacing(indy), &b.invh_y[0]);
const int indy = info.index[2];
_set_block_invh<CUP_BLOCK_SIZE>(m_map_z.get_grid_spacing(indz), &b.invh_z[0]);
}
}
*/
inline const TMeshMap& get_map_x() const { return m_map_x; }
inline const TMeshMap& get_map_y() const { return m_map_y; }
inline const TMeshMap& get_map_z() const { return m_map_z; }
inline TMeshMap& get_map_x() { return m_map_x; }
inline TMeshMap& get_map_y() { return m_map_y; }
inline TMeshMap& get_map_z() { return m_map_z; }
inline double minimum_cell_width(const int i=-1) const
{
assert(i < 3 && i > -2);
if (!m_initialized)
{
fprintf(stderr, "ERROR: NonUniformScheme.h: minimum_cell_width() "
"can not return m_h_min, not initialized.\n");
fflush(0); exit(1);
}
const double all_min = std::min({m_h_min[0], m_h_min[1], m_h_min[2]});
if (-1 == i) return all_min;
else return m_h_min[i];
}
inline double maximum_cell_width(const int i=-1) const
{
assert(i < 3 && i > -2);
if (!m_initialized)
{
fprintf(stderr, "ERROR: NonUniformScheme.h: maximum_cell_width() "
"can not return m_h_max, not initialized.\n");
fflush(0); exit(1);
}
const double all_max = std::max({m_h_max[0], m_h_max[1], m_h_max[2]});
if (-1 == i) return all_max;
else return m_h_max[i];
}
void print_mesh_statistics(const bool verb=true)
{
if (verb)
{
_compute_mesh_stats("x-direction", m_map_x.kernel_name(),
m_map_x.data_grid_spacing(), m_map_x.ncells() );
_compute_mesh_stats("y-direction", m_map_y.kernel_name(),
m_map_y.data_grid_spacing(), m_map_y.ncells() );
_compute_mesh_stats("z-direction", m_map_z.kernel_name(),
m_map_z.data_grid_spacing(), m_map_z.ncells() );
}
}
double compute_mean_grid_spacing()
{
const double avgHx = _harmonicMean(m_map_x.data_grid_spacing(),
m_map_x.ncells());
const double avgHy = _harmonicMean(m_map_y.data_grid_spacing(),
m_map_y.ncells());
const double avgHz = _harmonicMean(m_map_z.data_grid_spacing(),
m_map_z.ncells());
//return ( avgHx + avgHy + avgHz ) / 3;
return 3 / (1/avgHx + 1/avgHy + 1/avgHz);
}
double _harmonicMean(const double* const data, const unsigned int N)
{
double hmean = 0;
for (unsigned int i = 0; i < N; ++i) hmean += 1.0 / data[i];
return N / hmean;
}
private:
double m_h_min[3], m_h_max[3];
bool m_initialized;
TMeshMap m_map_x;
TMeshMap m_map_y;
TMeshMap m_map_z;
TVector m_all_delta_x;
TVector m_all_delta_y;
TVector m_all_delta_z;
template <int _BSIZE>
void _set_block_invh(const double* const grid_spacing, Real* const invh)
{
for (int i = 0; i < _BSIZE; ++i)
invh[i] = 1.0/grid_spacing[i];
}
void _compute_mesh_stats(const std::string header, const std::string name,
const double* const data, const unsigned int N)
{
const auto deltaM2 = [](double i, double delta) {
return std::pow(delta, 2) * i / (i+1.0);
};
const auto deltaM3 = [](double i, double delta, double M2) {
const double norm = i * (i-1.0) / std::pow(i+1.0, 2);
const double corr = 3*delta * M2 / (i+1.0);
return std::pow(delta, 3) * norm - corr;
};
const auto deltaM4 = [](double i, double delta, double M2, double M3) {
const double norm = i * (i*i - i + 1.0) / std::pow(i+1.0, 3);
const double cor1 = 6 * std::pow(delta, 2) * M2 / std::pow(i+1.0, 2);
const double cor2 = 4 * delta * M3 / (i+1.0);
return std::pow(delta, 4) * norm + cor1 - cor2;
};
printf("%s statistics %s.\n", name.c_str(), header.c_str());
{
double mean = 0, var = 0, skew = 0, kurt = 0;
double min = HUGE_VAL, max = -HUGE_VAL;
for (unsigned int i = 0; i < N; ++i) {
if (data[i] < min) min = data[i];
if (data[i] > max) max = data[i];
}
double M2 = 0, M3 = 0, M4 = 0;
for (unsigned int i = 0; i < N; ++i) {
const double delta = data[i] - mean;
const double dM2 = deltaM2(i, delta);
const double dM3 = deltaM3(i, delta, M2);
const double dM4 = deltaM4(i, delta, M2, M3);
mean += delta / (i+1.0);
M4 += dM4;
M3 += dM3;
M2 += dM2;
}
var = M2 / (N - 1);
skew = std::sqrt(N) * M3 / std::pow(M2 + 2e-16, 1.5);
kurt = N * M4 / (M2 * M2 + 2e-16) - 3;
printf("\tMesh spacing: mean=%e; std=%e; skew=%e; kurt=%e; min=%e; max=%e\n",
mean, std::sqrt(var), skew, kurt, min, max);
}
{
double mean = 0, var = 0, skew = 0, kurt = 0;
double min = HUGE_VAL, max = -HUGE_VAL;
for (unsigned int i = 1; i < N; ++i) {
const double r = data[i]/data[i-1];
if (r < min) min = r;
if (r > max) max = r;
}
double M2 = 0, M3 = 0, M4 = 0;
for (unsigned int i = 1; i < N; ++i) {
const double r = data[i]/data[i-1];
const double delta = r - mean;
const double dM2 = deltaM2(i, delta);
const double dM3 = deltaM3(i, delta, M2);
const double dM4 = deltaM4(i, delta, M2, M3);
mean += delta / (i+1.0);
M4 += dM4;
M3 += dM3;
M2 += dM2;
}
var = M2 / (N - 1);
skew = std::sqrt(N) * M3 / std::pow(M2 + 2e-16, 1.5);
kurt = N * M4 / (M2 * M2 + 2e-16) - 3;
printf("\tGrowth factor: mean=%e; std=%e; skew=%e; kurt=%e; min=%e; max=%e\n",
mean, std::sqrt(var), skew, kurt, min, max);
}
}
};
CubismUP_3D_NAMESPACE_END
#endif // NonUniformScheme
|
mask.h | // This program is free software: you can use, modify and/or redistribute it
// under the terms of the simplified BSD License. You should have received a
// copy of this license along this program. If not, see
// <http://www.opensource.org/licenses/bsd-license.html>.
//
// Copyright (C) 2012, Javier Sánchez Pérez <jsanchez@dis.ulpgc.es>
// Copyright (C) 2014, Nelson Monzón López <nmonzon@ctim.es>
// All rights reserved.
#ifndef MASK_H
#define MASK_H
#include <omp.h>
/**
*
* Function to apply a 3x3 mask to an image
*
*/
void
mask3x3 (const float *input, //input image
float *output, //output image
const int nx, //image width
const int ny, //image height
const int nz, // number of color channels in the image
const float *mask //mask to be applied
)
{
int nx_multichannel = nx * nz;
for(int index_multichannel = 0; index_multichannel < nz; index_multichannel++){
//apply the mask to the center body of the image
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++){
for (int j = 1; j < nx - 1; j++){
int k = (i * nx + j) * nz + index_multichannel;
double sum = 0;
for (int l = 0; l < 3; l++){
for (int m = 0; m < 3; m++){
int p = ((i + l - 1) * nx + j + m - 1) * nz + index_multichannel;
sum += input[p] * mask[l * 3 + m];
}
}
output[k] = sum;
}
}
//apply the mask to the first and last rows
#pragma omp parallel for
for (int j = 1; j < nx - 1; j++) {
int index = j * nz + index_multichannel;
double sum = 0;
sum += input[index - nz] * (mask[0] + mask[3]);
sum += input[index] * (mask[1] + mask[4]);
sum += input[index + nz] * (mask[2] + mask[5]);
sum += input[nx_multichannel + j - nz] * mask[6];
sum += input[nx_multichannel + j] * mask[7];
sum += input[nx_multichannel + j + nz] * mask[8];
output[j] = sum;
index = ((ny - 2) * nx + j) * nz + index_multichannel;
sum = 0;
sum += input[index - nz] * mask[0];
sum += input[index] * mask[1];
sum += input[index + nz] * mask[2];
index = ((ny - 1) * nx + j) * nz + index_multichannel;
sum += input[index - nz] * (mask[6] + mask[3]);
sum += input[index] * (mask[7] + mask[4]);
sum += input[index + 1] * (mask[8] + mask[5]);
output[index] = sum;
}
//apply the mask to the first and last columns
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++){
int index = i * nx_multichannel + index_multichannel;
double sum = 0;
int index_row = (i - 1) * nx_multichannel + index_multichannel;
sum += input[index_row] * (mask[0] + mask[1]);
sum += input[index_row + nz] * mask[2];
sum += input[index] * (mask[3] + mask[4]);
sum += input[index + nz] * mask[5];
index_row = (i + 1) * nx_multichannel + index_multichannel;
sum += input[index_row] * (mask[6] + mask[7]);
sum += input[index_row + nz] * mask[8];
output[index] = sum;
sum = 0;
sum += input[index - 2 * nz] * mask[0];
sum += input[index - nz] * (mask[1] + mask[2]);
index_row = (i + 1) * nx_multichannel + index_multichannel;
sum += input[index_row - 2 * nz] * mask[3];
sum += input[index_row - nz] * (mask[4] + mask[5]);
index_row = (i + 2) * nx_multichannel + index_multichannel;
sum += input[index_row - 2 * nz] * mask[6];
sum += input[index_row - nz] * (mask[7] + mask[8]);
output[(i * nx + nx - 1) * nz + index_multichannel] = sum;
}
//apply the mask to the four corners
output[index_multichannel] =
input[index_multichannel] * (mask[0] + mask[1] + mask[3] + mask[4]) +
input[index_multichannel + nz] * (mask[2] + mask[5]) +
input[nx_multichannel + index_multichannel] * (mask[6] + mask[7]) +
input[nx_multichannel + index_multichannel + nz] * mask[8];
output[nx_multichannel - nz + index_multichannel] =
input[(nx - 2) * nz + index_multichannel] * (mask[0] + mask[3]) +
input[(nx - 1) * nz + index_multichannel] * (mask[1] + mask[2] + mask[4] + mask[5]) +
input[(2 * nx - 2) * nz + index_multichannel] * mask[6] +
input[(2 * nx - 1) * nz + index_multichannel] * (mask[7] + mask[8]);
output[(ny - 1) * nx_multichannel + index_multichannel] =
input[(ny - 2) * nx_multichannel + index_multichannel] * (mask[0] + mask[1]) +
input[((ny - 2) * nx + 1) * nz + index_multichannel] * mask[2] +
input[(ny - 1) * nx_multichannel + index_multichannel] * (mask[3] + mask[4] + mask[6] + mask[7]) +
input[((ny - 1) * nx + 1) * nz + index_multichannel] * (mask[5] + mask[8]);
output[(ny * nx - 1) * nz + index_multichannel] =
input[((ny - 1) * nx - 2) * nz + index_multichannel] * mask[0] +
input[((ny - 1) * nx - 1) * nz + index_multichannel] * (mask[1] + mask[2]) +
input[(ny * nx - 2) * nz + index_multichannel] * (mask[3] + mask[6]) +
input[(ny * nx - 1) * nz + index_multichannel] * (mask[4] + mask[5] + mask[7] + mask[8]);
} // end loop for channels information
} // end mask3x3
/**
*
* Compute the second order X derivative
*
*/
void
Dxx (const float *I, //input image
float *Ixx, //oputput derivative
const int nx, //image width
const int ny, //image height
const int nz //number of color channels in the image
)
{
//mask of second derivative
float M[] = { 0., 0., 0.,
1., -2., 1.,
0., 0., 0.
};
//computing the second derivative
mask3x3 (I, Ixx, nx, ny, nz, M);
}
/**
*
* Compute the second order Y derivative
*
*/
void
Dyy (const float *I, //input image
float *Iyy, //oputput derivative
const int nx, //image width
const int ny, //image height
const int nz //number of color channels in the image
)
{
//mask of second derivative
float M[] = { 0., 1., 0.,
0., -2., 0.,
0., 1., 0.
};
//computing the second derivative
mask3x3 (I, Iyy, nx, ny, nz, M);
}
/**
*
* Compute the second order XY derivative
*
*/
void
Dxy (const float *I, //input image
float *Ixy, //oputput derivative
const int nx, //image width
const int ny, //image height
const int nz //number of color channels in the image
)
{
//mask of second derivative
float M[] = { 1. / 4., 0., -1. / 4.,
0., 0., 0.,
-1. / 4., 0., 1. / 4.
};
//computing the second derivative
mask3x3 (I, Ixy, nx, ny, nz, M);
}
/**
*
* Compute the gradient with central differences
*
*/
void
gradient (const float *input, //input image
float *dx, //computed x derivative
float *dy, //computed y derivative
const int nx, //image width
const int ny, //image height
const int nz //number of color channels in the image
)
{
const int nx_multichannel = nx * nz;
for(int index_multichannel = 0; index_multichannel < nz; index_multichannel++){
//gradient in the center body of the image
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++){
for (int j = 1; j < nx - 1; j++){
const int k = (i * nx + j) * nz + index_multichannel;
dx[k] = 0.5 * (input[k + nz] - input[k - nz]);
dy[k] = 0.5 * (input[k + nx_multichannel] - input[k - nx_multichannel]);
}
}
//gradient in the first and last rows
#pragma omp parallel for
for (int j = 1; j < nx - 1; j++){
const int index = j * nz + index_multichannel;
dx[j] = 0.5 * (input[index + nz] - input[index - nz]);
dy[j] = 0.5 * (input[index + nx_multichannel] - input[index]);
const int k = ((ny - 1) * nx + j) * nz + index_multichannel;
dx[k] = 0.5 * (input[k + nz] - input[k - nz]);
dy[k] = 0.5 * (input[k] - input[k - nx_multichannel]);
}
//gradient in the first and last columns
#pragma omp parallel for
for (int i = 1; i < ny - 1; i++) {
const int p = (i * nx_multichannel) + index_multichannel;
dx[p] = 0.5 * (input[p + nz] - input[p]);
dy[p] = 0.5 * (input[p + nx_multichannel] - input[p - nx_multichannel]);
const int k = ((i + 1) * nx - 1) * nz + index_multichannel;
dx[k] = 0.5 * (input[k] - input[k - nz]);
dy[k] = 0.5 * (input[k + nx_multichannel] - input[k - nx_multichannel]);
}
//calculate the gradient in the corners
dx[index_multichannel] = 0.5 * (input[index_multichannel + nz] - input[index_multichannel]);
dy[index_multichannel] = 0.5 * (input[nx_multichannel + index_multichannel] - input[index_multichannel]);
const int corner_up_right = (nx-1) * nz + index_multichannel;
dx[corner_up_right] = 0.5 * (input[corner_up_right] - input[corner_up_right - nz]);
dy[corner_up_right] = 0.5 * (input[(2 * nx_multichannel) + index_multichannel - nz] - input[corner_up_right]);
const int corner_down_left = ((ny - 1) * nx) * nz + index_multichannel;
dx[corner_down_left] = 0.5 * (input[corner_down_left + nz] - input[corner_down_left]);
dy[corner_down_left] = 0.5 * (input[corner_down_left] - input[(ny - 2) * nx_multichannel + index_multichannel]) ;
const int corner_down_right = ny * nx_multichannel - nz + index_multichannel;
dx[corner_down_right] = 0.5 * (input[corner_down_right] - input[corner_down_right - nz]);
dy[corner_down_right] = 0.5 * (input[corner_down_right] - input[(ny - 1) * nx_multichannel - nz + index_multichannel]);
} // end loop for multi-channel
} // end gradient function
#endif
|
reduce_demo.c | //------------------------------------------------------------------------------
// GraphBLAS/Demo/Program/reduce_demo: reduce a matrix to a scalar
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GraphBLAS.h"
// #define N 65536
#define N 16384
int main (void)
{
#if defined ( _OPENMP )
double t0 = omp_get_wtime ( ) ;
#endif
// start GraphBLAS
GrB_init (GrB_NONBLOCKING) ;
int nthreads ;
GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads) ;
printf ("demo: reduce a matrix to a scalar, nthreads: %d\n", nthreads) ;
int nthreads_max ;
GxB_Global_Option_get (GxB_GLOBAL_NTHREADS, &nthreads_max) ;
printf ("# of threads: %d\n", nthreads_max) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("GPU warmup time: %g\n", t0) ;
t0 = omp_get_wtime ( ) ;
#endif
GrB_Index nrows = N ;
GrB_Index ncols = N ;
GrB_Matrix A ;
GrB_Matrix_new (&A, GrB_INT64, nrows, ncols) ;
GrB_Index *I = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ;
GrB_Index *J = (GrB_Index *) malloc (nrows * ncols * sizeof (GrB_Index)) ;
int64_t *X = (int64_t *) malloc (nrows * ncols * sizeof (int64_t)) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_max) schedule(static)
for (k = 0 ; k < N*N ; k++)
{
// k = i * N + j ;
int64_t i = k / N ;
int64_t j = k % N ;
// int x = (int) (rand ( ) & 0xFF) ;
int x = (int) (k & 0xFF) ;
I [k] = i ;
J [k] = j ;
X [k] = x ;
}
GrB_Index nvals = N*N ;
GrB_Matrix_build_INT64 (A, I, J, X, nvals, GrB_PLUS_INT64) ;
free (I) ;
free (J) ;
free (X) ;
#if defined ( _OPENMP )
t0 = omp_get_wtime ( ) - t0 ;
printf ("time to create matrix: %g\n", t0) ;
#endif
GrB_Index result ;
double t1 ;
printf ("\nreduce to a scalar:\n") ;
for (int nthreads = 1 ; nthreads <= nthreads_max ; nthreads++)
{
GxB_Global_Option_set (GxB_GLOBAL_NTHREADS, nthreads) ;
#if defined ( _OPENMP )
double t = omp_get_wtime ( ) ;
#endif
GrB_Matrix_reduce_UINT64 (&result, NULL, GrB_PLUS_MONOID_INT64,
A, NULL) ;
#if defined ( _OPENMP )
t = omp_get_wtime ( ) - t ;
if (nthreads == 1) t1 = t ;
printf ("nthreads %3d time: %12.6f speedup %8.2f\n",
nthreads, t, t1/t) ;
#endif
}
printf ("result %" PRId64 "\n", result) ;
// free everyting
GrB_Matrix_free (&A) ;
GrB_finalize ( ) ;
}
|
GraphCut.h | /*
* MIT License
*
* Copyright (c) 2018-2019 Benjamin Köhler
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#pragma once
#ifndef BKTOOLS_GRAPHCUT_H
#define BKTOOLS_GRAPHCUT_H
#if defined(__GNUC__) && defined(_OPENMP)
#include <parallel/algorithm>
#else
#include <algorithm>
#endif
#include <array>
#include <chrono>
#include <cstdint>
#include <iostream>
#include <limits>
#include <numeric>
#include <utility>
#include <tuple>
#include <type_traits>
#include <vector>
#include <bkTools/graphcut/GraphCutBase.h>
#include <bkTools/graphcut/GraphCutBase_Run.h>
#include <bkTools/graphcut/gc_definitions.h>
#ifdef BK_EMIT_PROGRESS
#include <bkTools/progress/GlobalProgressManager.h>
#include <bkTools/progress/Progress.h>
#include <bkTools/localization/GlobalLocalizationManager.h>
#endif
namespace bk
{
template<int TDims> class GraphCut
: public gc_details::GraphCutBase<TDims, GraphCut<TDims>>,
public gc_details::GraphCutBase_Run<TDims, GraphCut<TDims>>
{
//====================================================================================================
//===== DEFINITIONS
//====================================================================================================
using self_type = GraphCut<TDims>;
using base_type = gc_details::GraphCutBase<TDims, GraphCut<TDims>>;
using run_base_type = gc_details::GraphCutBase_Run<TDims, GraphCut<TDims>>;
using gc = typename base_type::gc;
using pt2Nd_type = typename base_type::pt2Nd_type;
template<typename T> using vector_grid_type = typename base_type::template vector_grid_type<T>;
public:
using flag_type = typename base_type::flag_type;
using id_type = typename base_type::id_type;
friend base_type;
friend gc_details::GraphCutBase_Run<TDims, GraphCut<TDims>>;
//====================================================================================================
//===== CONSTRUCTORS & DESTRUCTOR
//====================================================================================================
public:
/// @{ -------------------------------------------------- CTOR
constexpr GraphCut()
: base_type(),
run_base_type(this->_current_timestamp)
{ /* do nothing */ }
constexpr GraphCut(const self_type&) = default;
constexpr GraphCut(self_type&&) noexcept = default;
/// @}
/// @{ -------------------------------------------------- DTOR
~GraphCut() = default;
/// @}
//====================================================================================================
//===== GETTER
//====================================================================================================
private:
/// @{ -------------------------------------------------- IS VALID
[[nodiscard]] bool is_valid(const id_type& node) const
{
// out of bounds?
for (int dimId = 0; dimId < TDims; ++dimId)
{
if (node[dimId] < 0 || node[dimId] >= this->_size[dimId])
{ return false; }
}
return true;
}
[[nodiscard]] static constexpr bool is_valid_extra(const id_type& /*node*/) noexcept
{ return true; }
[[nodiscard]] static constexpr bool is_valid_lower_bound(int dimId, const id_type& node)
{ return node[dimId] > 0; }
[[nodiscard]] bool is_valid_upper_bound(int dimId, const id_type& node) const
{ return node[dimId] < this->_size[dimId] - 1; }
/// @}
//====================================================================================================
//===== SETTER
//====================================================================================================
public:
/// @{ -------------------------------------------------- OPERATOR =
[[maybe_unused]] self_type& operator=(const self_type&) = delete;
[[maybe_unused]] self_type& operator=(self_type&&) noexcept = default;
/// @}
//====================================================================================================
//===== FUNCTIONS
//====================================================================================================
private:
/// @{ -------------------------------------------------- HELPER: RESET_IMPL
static constexpr void reset_impl() noexcept
{ /* do nothing */ }
/// @}
public:
/// @{ -------------------------------------------------- RUN
void run()
{
/*
* validity check
*/
if (this->_up2date)
{ return; }
const bool anySizeIsZero = 0 == std::accumulate(this->_size.begin(), this->_size.end(), 1, [](int x, int y)
{ return x * y; });
if (anySizeIsZero || this->_connected_to_source.empty() || this->_connected_to_sink.empty())
{
std::cerr << "init graph cut first" << std::endl;
return;
}
#ifdef BK_EMIT_PROGRESS
bk::Progress& prog = bk_progress.emplace_task(25, ___("Performing graph cut"));
#endif
this->reset();
#ifdef BK_EMIT_PROGRESS
prog.increment(1);
#endif
this->_current_timestamp = 1;
constexpr int timestamp_init = 1;
const std::chrono::system_clock::time_point clock_start = std::chrono::system_clock::now(); // todo
unsigned int cnt_iterations = 0;
for (unsigned int i = 0; i < this->_connected_to_source.size(); ++i)
{
const id_type& s = this->_connected_to_source[i];
this->set_source_as_parent(s);
this->set_active(s);
this->distance_to_terminal(s) = 0;
this->timestamp(s) = timestamp_init;
}
#ifdef BK_EMIT_PROGRESS
prog.increment(1);
#endif
for (unsigned int i = 0; i < this->_connected_to_sink.size(); ++i)
{
const id_type& s = this->_connected_to_sink[i];
this->set_sink_as_parent(s);
this->set_active(s);
this->distance_to_terminal(s) = 0;
this->timestamp(s) = timestamp_init;
}
#ifdef BK_EMIT_PROGRESS
prog.increment(1);
#endif
while (this->grow())
{
if (this->augment())
{ this->adopt(); }
++cnt_iterations;
}
#pragma omp parallel for
for (unsigned int i = 0; i < this->_connected_to_source.size(); ++i)
{ this->set_source_set(this->_connected_to_source[i]); }
#ifdef BK_EMIT_PROGRESS
prog.increment(1);
#endif
#pragma omp parallel for
for (unsigned int i = 0; i < this->_connected_to_sink.size(); ++i)
{ this->set_sink_set(this->_connected_to_sink[i]); }
#ifdef BK_EMIT_PROGRESS
prog.set_finished();
#endif
const std::chrono::system_clock::time_point clock_stop = std::chrono::system_clock::now();
const unsigned int time_in_sec = static_cast<unsigned int>(std::chrono::duration_cast<std::chrono::seconds>(clock_stop - clock_start).count());
const unsigned int time_in_ms = static_cast<unsigned int>(std::chrono::duration_cast<std::chrono::milliseconds>(clock_stop - clock_start).count());
std::cout << "graph cut finished in " << cnt_iterations << " iterations in " << time_in_sec << " s (" << time_in_ms << " ms)" << std::endl;
this->_up2date = true;
}
/// @}
}; // class GraphCut
} // namespace bk
#endif //BKTOOLS_GRAPHCUT_H
|
equation_batchnorm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <libxsmm_sync.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <omp.h>
#define ALIGNDOWN(N, A) ((N) & ~((A)-1))
#define USE_VECTORIZED_PATH 1
float upconvert_bf16(libxsmm_bfloat16 x) {
union libxsmm_bfloat16_hp bf16_hp;
bf16_hp.i[1] = x;
bf16_hp.i[0] = 0;
return bf16_hp.f;
}
void tpp_batchnorm_fwd_bf16(long N, long CP, long HW, long CB, long num_HW_blocks, libxsmm_bfloat16 *pinp, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pbeta, float *mean, float *var,
libxsmm_bfloat16 *pout, float eps, libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, libxsmm_meltwfunction_unary copy_kernel) {
const float scale = 1.0f / ((float)N*HW);
LIBXSMM_ALIGNED(float sum_X_X2[2*CP*CB], 64);
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, inp, pinp, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, out, pout, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB); /* [CP, CB] */
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, beta, pbeta, CB); /* [CP, CB] */
LIBXSMM_ALIGNED(float sum_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float sumsq_N[CP*N*CB], 64);
#pragma omp parallel
{
LIBXSMM_ALIGNED(float s[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
int n, cp;
#pragma omp for nowait collapse(2)
for (cp = 0; cp < CP; cp++) {
for(n = 0; n < N; n++){
int hwb = 0;
float *sum_ncp_ptr = &sum_N[cp*N*CB + n*CB];
float *sumsq_ncp_ptr = &sumsq_N[cp*N*CB + n*CB];
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = sum_ncp_ptr;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = sumsq_ncp_ptr;
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_ncp_ptr[cb] = 0.0f; */
/* sumsq_ncp_ptr[cb] = 0.0f; */
/* } */
libxsmm_meltw_binary_param add_param;
libxsmm_meltw_unary_param reduce_HW_params; /*Private params and tmp array */
LIBXSMM_ALIGNED(float lcl_sum_X_X2[2*CB], 64);
reduce_HW_params.out.primary = lcl_sum_X_X2; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
reduce_HW_kernel(&reduce_HW_params); /* [HW, CB] -----> [2 * CB] */
add_param.in0.primary = sum_ncp_ptr;
add_param.in1.primary = lcl_sum_X_X2;
add_param.out.primary = sum_ncp_ptr;
add_kernel(&add_param);
add_param.in0.primary = sumsq_ncp_ptr;
add_param.in1.primary = &lcl_sum_X_X2[CB];
add_param.out.primary = sumsq_ncp_ptr;
add_kernel(&add_param);
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_ncp_ptr[cb] += lcl_sum_X_X2[cb]; */
/* sumsq_ncp_ptr[cb] += lcl_sum_X_X2[CB + cb]; */
/* } */
}
}
}
#pragma omp barrier
#pragma omp for
for (cp = 0; cp < CP; cp++) {
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = &sum_X_X2[cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &sum_X_X2[CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_X_X2[cp*CB + cb] = 0.0f; */
/* sum_X_X2[CP*CB + (cp*CB + cb)] = 0.0f; */
/* } */
int ni, cb;
libxsmm_meltw_binary_param add_param;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &sum_X_X2[cp*CB];
add_param.in1.primary = &sum_N[cp*N*CB + ni*CB];
add_param.out.primary = &sum_X_X2[cp*CB];
add_kernel(&add_param);
add_param.in0.primary = &sum_X_X2[CP*CB + cp*CB];
add_param.in1.primary = &sumsq_N[cp*N*CB + ni*CB];
add_param.out.primary = &sum_X_X2[CP*CB + cp*CB];
add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_X_X2[cp*CB + cb] += sum_N[cp*N*CB + n*CB + cb]; */
/* sum_X_X2[CP*CB + (cp*CB + cb)] += sumsq_N[cp*N*CB + n*CB + cb]; */
/* } */
}
for(cb = 0; cb < CB; cb++){
mean[cp*CB + cb] = sum_X_X2[cp*CB + cb] * scale; /* E[X] */
var[cp*CB + cb] = (sum_X_X2[CP*CB + cp*CB + cb] * scale) - (mean[cp*CB + cb]*mean[cp*CB + cb]); /* var(X) = E[X^2] - (E[X])^2 */
}
}
#pragma omp for nowait collapse(2)
for (cp = 0; cp < CP; cp++){
for(n = 0; n < N; n++){ /* Parallelize over batches and CP */
libxsmm_matrix_arg arg_array[5]; /* private eqn args and params*/
libxsmm_matrix_eqn_param eqn_param;
int hwb, cb;
for(cb = 0; cb < CB; cb++){
s[cb] = 1.0f / ((float)sqrt(var[cp*CB + cb] + eps)); /* s = 1/sqrt(var(X) + eps) [CB] */
b[cb] = -1 * mean[cp*CB + cb] * s[cb]; /* b = -E[X]/sqrt(var(X) + eps) [CB] */
}
arg_array[1].primary = s; /* [CB] */
arg_array[2].primary = b; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
}
void tpp_batchnorm_bwd_bf16(long N, long CP, long HW, long CB, long num_HW_blocks, libxsmm_bfloat16 *pdout, libxsmm_bfloat16 *pinp, float *mean, float *var, libxsmm_bfloat16 *pgamma, libxsmm_bfloat16 *pdin, float *pdgamma, float *pdbeta,
libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function din_func, float eps,
libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, libxsmm_meltwfunction_unary copy_kernel) {
const float scale = 1.0f / ((float)N*HW); /* Scaling parameter*/
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, din, pdin, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, inp, pinp, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, libxsmm_bfloat16, dout, pdout, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(2, libxsmm_bfloat16, gamma, pgamma, CB); /* [CP, CB] */
/* LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); */
/* LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); */
LIBXSMM_ALIGNED(float dgamma_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float dbeta_N[CP*N*CB], 64);
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
LIBXSMM_ALIGNED(float c[CB], 64);
int n, cp;
#pragma omp for nowait collapse(2)
for (cp = 0; cp < CP; cp++) {
for (n = 0; n < N; n++) {
int hwb, cb;
libxsmm_matrix_arg arg_array[10]; /* Private values of args and params */
libxsmm_matrix_eqn_param eqn_param;
LIBXSMM_ALIGNED(float lcl_dgamma_ptr[CB], 64);
LIBXSMM_ALIGNED(float lcl_dbeta_ptr[CB], 64);
float *dgamma_ncp_ptr = &dgamma_N[cp*N*CB + n*CB];
float *dbeta_ncp_ptr = &dbeta_N[cp*N*CB + n*CB];
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = lcl_dgamma_ptr;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = lcl_dbeta_ptr;
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* lcl_dgamma_ptr[cb] = 0.0f; */
/* lcl_dbeta_ptr[cb] = 0.0f; */
/* } */
for(cb = 0; cb < CB; cb++){
a[cb] = 1.0f / ((float)sqrt(var[cp*CB + cb] + eps));
b[cb] = -a[cb]*mean[cp*CB + cb];
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[4].primary = lcl_dgamma_ptr;
arg_array[5].primary = lcl_dbeta_ptr;
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.inputs = arg_array;
eqn_param.output.primary = lcl_dgamma_ptr;
dgamma_func(&eqn_param); /* dgamma += (a * inp + b) * dout */
eqn_param.output.primary = lcl_dbeta_ptr;
dbeta_func(&eqn_param); /* dbeta += dout */
}
libxsmm_meltw_unary_param copy_param;
copy_param.in.primary = lcl_dgamma_ptr;
copy_param.out.primary = dgamma_ncp_ptr;
copy_kernel(©_param);
copy_param.in.primary = lcl_dbeta_ptr;
copy_param.out.primary = dbeta_ncp_ptr;
copy_kernel(©_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* dgamma_ncp_ptr[cb] = lcl_dgamma_ptr[cb]; */
/* dbeta_ncp_ptr[cb] = lcl_dbeta_ptr[cb]; */
/* } */
}
}
#pragma omp barrier
#pragma omp for
for (cp = 0; cp < CP; cp++) {
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = &pdgamma[cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &pdbeta[cp*CB];
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* pdgamma[cp*CB + cb] = 0.0f; */
/* pdbeta[cp*CB + cb] = 0.0f; */
/* } */
libxsmm_meltw_binary_param add_param;
int ni;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &pdgamma[cp*CB];
add_param.in1.primary = &dgamma_N[cp*N*CB + ni*CB];
add_param.out.primary = &pdgamma[cp*CB];
add_kernel(&add_param);
add_param.in0.primary = &pdbeta[cp*CB];
add_param.in1.primary = &dbeta_N[cp*N*CB + ni*CB];
add_param.out.primary = &pdbeta[cp*CB];
add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* pdgamma[cp*CB + cb] += dgamma_N[cp*N*CB + n*CB + cb]; */
/* pdbeta[cp*CB + cb] += dbeta_N[cp*N*CB + n*CB + cb]; */
/* } */
}
}
#pragma omp for nowait collapse(2) /* Parallelize over batches */
for (cp = 0; cp < CP; cp++) {
for(n = 0; n < N; n++){
libxsmm_matrix_arg arg_array[8]; /* Private eqn args and params */
libxsmm_matrix_eqn_param eqn_param;
int hwb, cb;
for(cb = 0; cb < CB; cb++){
a[cb] = upconvert_bf16(pgamma[cp*CB + cb]) / ((float)sqrt(var[cp*CB + cb] + eps)); /* a = gamma_ptr[CB] * brstd_ptr[CB] */
b[cb] = -a[cb] * scale * pdgamma[cp*CB + cb] / ((float)sqrt(var[cp*CB + cb] + eps)); /* b = gamma_ptr[CB] * brstd_ptr[CB] * del_gamma_ptr[v] * brstd_ptr[CB] * recp_nhw */
c[cb] = -b[cb] * mean[cp*CB + cb] - a[cb] * scale * pdbeta[cp*CB + cb] ; /* c = -gamma_ptr[CB] * brstd_ptr[CB] * recp_nhw * del_beta_ptr[CB] + gamma_ptr[CB] * brstd_ptr[CB] * recp_nhw * bmean_ptr[CB] * del_gamma_ptr[CB] * brstd_ptr[CB]) */
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = c;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
din_func(&eqn_param); /* din = dout * a * gamma + b * inp + c */
}
}
}
}
}
void tpp_batchnorm_fwd_fp32(long N, long CP, long HW, long CB, long num_HW_blocks, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps,
libxsmm_matrix_eqn_function func10, libxsmm_meltwfunction_unary reduce_HW_kernel, libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, libxsmm_meltwfunction_unary copy_kernel) {
const float scale = 1.0f /((float)N * HW);
LIBXSMM_ALIGNED(float sum_X_X2[CP*2*CB], 64);
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); /* [CP, CB] */
LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB); /* [CP, CB] */
LIBXSMM_ALIGNED(float sum_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float sumsq_N[CP*N*CB], 64);
#pragma omp parallel
{
LIBXSMM_ALIGNED(float s[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
int n, cp;
#pragma omp for nowait collapse(2)
for (cp = 0; cp < CP; cp++) {
for(n = 0; n < N; n++){
int hwb;
float *sum_ncp_ptr = &sum_N[cp*N*CB + n*CB];
float *sumsq_ncp_ptr = &sumsq_N[cp*N*CB + n*CB];
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = sum_ncp_ptr;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = sumsq_ncp_ptr;
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_ncp_ptr[cb] = 0.0f; */
/* sumsq_ncp_ptr[cb] = 0.0f; */
/* } */
libxsmm_meltw_binary_param add_param;
libxsmm_meltw_unary_param reduce_HW_params; /*Private params and tmp array */
LIBXSMM_ALIGNED(float lcl_sum_X_X2[2*CB], 64);
reduce_HW_params.out.primary = lcl_sum_X_X2; /* [2*CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
reduce_HW_params.in.primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
reduce_HW_kernel(&reduce_HW_params); /* [HW, CB] -----> [2 * CB] */
add_param.in0.primary = sum_ncp_ptr;
add_param.in1.primary = lcl_sum_X_X2;
add_param.out.primary = sum_ncp_ptr;
add_kernel(&add_param);
add_param.in0.primary = sumsq_ncp_ptr;
add_param.in1.primary = &lcl_sum_X_X2[CB];
add_param.out.primary = sumsq_ncp_ptr;
add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_ncp_ptr[cb] += lcl_sum_X_X2[cb]; */
/* sumsq_ncp_ptr[cb] += lcl_sum_X_X2[CB + cb]; */
/* } */
}
}
}
#pragma omp barrier
#pragma omp for
for (cp = 0; cp < CP; cp++) {
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = &sum_X_X2[cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &sum_X_X2[CP*CB + cp*CB];
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_X_X2[cp*CB + cb] = 0.0f; */
/* sum_X_X2[CP*CB + (cp*CB + cb)] = 0.0f; */
/* } */
libxsmm_meltw_binary_param add_param;
int cb, ni;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &sum_X_X2[cp*CB];
add_param.in1.primary = &sum_N[cp*N*CB + ni*CB];
add_param.out.primary = &sum_X_X2[cp*CB];
add_kernel(&add_param);
add_param.in0.primary = &sum_X_X2[CP*CB + cp*CB];
add_param.in1.primary = &sumsq_N[cp*N*CB + ni*CB];
add_param.out.primary = &sum_X_X2[CP*CB + cp*CB];
add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* sum_X_X2[cp*CB + cb] += sum_N[cp*N*CB + n*CB + cb]; */
/* sum_X_X2[CP*CB + (cp*CB + cb)] += sumsq_N[cp*N*CB + n*CB + cb]; */
/* } */
}
for(cb = 0; cb < CB; cb++){
mean[cp*CB + cb] = sum_X_X2[cp*CB + cb] * scale; /* E[X] */
var[cp*CB + cb] = (sum_X_X2[CP*CB + cp*CB + cb] * scale) - (mean[cp*CB + cb]*mean[cp*CB + cb]);
}
}
#pragma omp for nowait collapse(2)
for (cp = 0; cp < CP; cp++){
for(n = 0; n < N; n++){ /* Parallelize over batches and CP*/
libxsmm_matrix_arg arg_array[5]; /* private eqn args and params*/
libxsmm_matrix_eqn_param eqn_param;
int hwb, cb;
for(cb = 0; cb < CB; cb++){
s[cb] = 1.0f / ((float)sqrt(var[cp*CB + cb] + eps)); /* s = 1/sqrt(var(X) + eps) [CB] */
b[cb] = -1 * mean[cp*CB + cb] * s[cb]; /* b = -E[X]/sqrt(var(X) + eps) [CB] */
}
arg_array[1].primary = s; /* [CB] */
arg_array[2].primary = b; /* [CB] */
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB); /* [CB] */
arg_array[4].primary = &LIBXSMM_VLA_ACCESS(2, beta, cp, 0, CB); /* [CB] */
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW, CB] */
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, out, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB); /* [HW,CB] */
func10(&eqn_param); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
}
void tpp_batchnorm_bwd_fp32(long N, long CP, long HW, long CB, long num_HW_blocks, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta,
libxsmm_matrix_eqn_function dgamma_func, libxsmm_matrix_eqn_function dbeta_func, libxsmm_matrix_eqn_function din_func, float eps,
libxsmm_meltwfunction_unary all_zero_kernel, libxsmm_meltwfunction_binary add_kernel, libxsmm_meltwfunction_unary copy_kernel) {
const float scale = 1.0f / ((float)N*HW); /* Scaling parameter*/
LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB); /* [CP, CB] */
/* LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); */
/* LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); */
LIBXSMM_ALIGNED(float dgamma_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float dbeta_N[CP*N*CB], 64);
#pragma omp parallel
{
LIBXSMM_ALIGNED(float a[CB], 64);
LIBXSMM_ALIGNED(float b[CB], 64);
LIBXSMM_ALIGNED(float c[CB], 64);
int n, cp;
#pragma omp for nowait collapse(2)
for (cp = 0; cp < CP; cp++) {
for (n = 0; n < N; n++) {
int hwb, cb;
libxsmm_matrix_arg arg_array[10]; /* Private values of args and params */
libxsmm_matrix_eqn_param eqn_param;
LIBXSMM_ALIGNED(float lcl_dgamma_ptr[CB], 64);
LIBXSMM_ALIGNED(float lcl_dbeta_ptr[CB], 64);
float *dgamma_ncp_ptr = &dgamma_N[cp*N*CB + n*CB];
float *dbeta_ncp_ptr = &dbeta_N[cp*N*CB + n*CB];
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = lcl_dgamma_ptr;
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = lcl_dbeta_ptr;
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* lcl_dgamma_ptr[cb] = 0.0f; */
/* lcl_dbeta_ptr[cb] = 0.0f; */
/* } */
for(cb = 0; cb < CB; cb++){
a[cb] = 1.0f / ((float)sqrt(var[cp*CB + cb] + eps));
b[cb] = -a[cb]*mean[cp*CB + cb];
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[4].primary = lcl_dgamma_ptr;
arg_array[5].primary = lcl_dbeta_ptr;
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.inputs = arg_array;
eqn_param.output.primary = lcl_dgamma_ptr;
dgamma_func(&eqn_param); /* dgamma += (a * inp + b) * dout */
eqn_param.output.primary = lcl_dbeta_ptr;
dbeta_func(&eqn_param); /* dbeta += dout */
}
libxsmm_meltw_unary_param copy_param;
copy_param.in.primary = lcl_dgamma_ptr;
copy_param.out.primary = dgamma_ncp_ptr;
copy_kernel(©_param);
copy_param.in.primary = lcl_dbeta_ptr;
copy_param.out.primary = dbeta_ncp_ptr;
copy_kernel(©_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* dgamma_ncp_ptr[cb] = lcl_dgamma_ptr[cb]; */
/* dbeta_ncp_ptr[cb] = lcl_dbeta_ptr[cb]; */
/* } */
}
}
#pragma omp barrier
#pragma omp for
for (cp = 0; cp < CP; cp++) {
libxsmm_meltw_unary_param all_zero_param;
all_zero_param.out.primary = &pdgamma[cp*CB];
all_zero_kernel(&all_zero_param);
all_zero_param.out.primary = &pdbeta[cp*CB];
all_zero_kernel(&all_zero_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* pdgamma[cp*CB + cb] = 0.0f; */
/* pdbeta[cp*CB + cb] = 0.0f; */
/* } */
libxsmm_meltw_binary_param add_param;
int ni;
for(ni = 0; ni < N; ni++){
add_param.in0.primary = &pdgamma[cp*CB];
add_param.in1.primary = &dgamma_N[cp*N*CB + ni*CB];
add_param.out.primary = &pdgamma[cp*CB];
add_kernel(&add_param);
add_param.in0.primary = &pdbeta[cp*CB];
add_param.in1.primary = &dbeta_N[cp*N*CB + ni*CB];
add_param.out.primary = &pdbeta[cp*CB];
add_kernel(&add_param);
/* #pragma omp simd */
/* for (int cb = 0; cb < CB; cb++) { */
/* pdgamma[cp*CB + cb] += dgamma_N[cp*N*CB + n*CB + cb]; */
/* pdbeta[cp*CB + cb] += dbeta_N[cp*N*CB + n*CB + cb]; */
/* } */
}
}
#pragma omp for nowait collapse(2) /* Parallelize over batches and CP*/
for (cp = 0; cp < CP; cp++) {
for(n = 0; n < N; n++){
libxsmm_matrix_arg arg_array[8]; /* Private eqn args and params */
libxsmm_matrix_eqn_param eqn_param;
int hwb, cb;
for(cb = 0; cb < CB; cb++){
a[cb] = pgamma[cp*CB + cb] / ((float)sqrt(var[cp*CB + cb] + eps)); /* a = gamma_ptr[CB] * brstd_ptr[CB] */
b[cb] = -a[cb] * scale * pdgamma[cp*CB + cb] / ((float)sqrt(var[cp*CB + cb] + eps)); /* b = gamma_ptr[CB] * brstd_ptr[CB] * del_gamma_ptr[v] * brstd_ptr[CB] * recp_nhw */
c[cb] = -b[cb] * mean[cp*CB + cb] - a[cb] * scale * pdbeta[cp*CB + cb] ; /* c = -gamma_ptr[CB] * brstd_ptr[CB] * recp_nhw * del_beta_ptr[CB] + gamma_ptr[CB] * brstd_ptr[CB] * recp_nhw * bmean_ptr[CB] * del_gamma_ptr[CB] * brstd_ptr[CB]) */
}
arg_array[1].primary = a;
arg_array[2].primary = b;
arg_array[6].primary = &LIBXSMM_VLA_ACCESS(2, gamma, cp, 0, CB);
arg_array[7].primary = c;
for(hwb=0; hwb < num_HW_blocks; hwb++){
arg_array[0].primary = &LIBXSMM_VLA_ACCESS(4, inp, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
arg_array[3].primary = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
eqn_param.inputs = arg_array;
eqn_param.output.primary = &LIBXSMM_VLA_ACCESS(4, din, n, cp, hwb*(HW/num_HW_blocks), 0, CP, HW, CB);
din_func(&eqn_param); /* din = dout * a + b * inp + c */
}
}
}
}
}
void scaler_batchnorm_fwd_fp32(long N, long CP, long HW, long CB, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps){
LIBXSMM_ALIGNED(float sum_X[CP*CB], 64);
LIBXSMM_ALIGNED(float sum_X2[CP*CB], 64);
LIBXSMM_ALIGNED(float s[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB);
LIBXSMM_ALIGNED(float sum_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float sumsq_N[CP*N*CB], 64);
/* #pragma omp parallel for collapse(2) reduction(+: sum_X[:2*CP*CB]) reduction(+: sum_X2[:2*CP*CB]) */
/* for(int n = 0; n < N; n++){ */
/* for(int cp = 0; cp < CP; cp++){ */
/* for(int hw = 0; hw < HW; hw++){ */
/* for(int cb = 0; cb < CB; cb++){ */
/* sum_X[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB); */
/* sum_X2[cp*CB + cb] += (LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB)*LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB)); */
/* } */
/* } */
/* } */
/* } */
int n, cp, j;
#pragma omp parallel
{
#pragma omp for collapse(2)
for(n = 0; n < N; n++){
for (cp = 0; cp < CP; cp++) {
int hw, cb;
LIBXSMM_ALIGNED(float lcl_sum_ptr[CB], 64);
LIBXSMM_ALIGNED(float lcl_sumsq_ptr[CB], 64);
float *sum_ncp_ptr = &sum_N[cp*N*CB + n*CB];
float *sumsq_ncp_ptr = &sumsq_N[cp*N*CB + n*CB];
for (cb = 0; cb < CB; cb++) {
lcl_sum_ptr[cb] = 0.0f;
lcl_sumsq_ptr[cb] = 0.0f;
}
for(hw = 0; hw < HW; hw++){
for(cb = 0; cb < CB; cb++){
lcl_sum_ptr[cb] += LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB);
lcl_sumsq_ptr[cb] += (LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB)*LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB));
}
}
for (cb = 0; cb < CB; cb++) {
sum_ncp_ptr[cb] = lcl_sum_ptr[cb];
sumsq_ncp_ptr[cb] = lcl_sumsq_ptr[cb];
}
}
}
#pragma omp barrier
#pragma omp for
for (cp = 0; cp < CP; cp++) {
int ni, cb;
for (cb = 0; cb < CB; cb++) {
sum_X[cp*CB + cb] = 0.0f;
sum_X2[cp*CB + cb] = 0.0f;
}
for(ni = 0; ni < N; ni++){
for (cb = 0; cb < CB; cb++) {
sum_X[cp*CB + cb] += sum_N[cp*N*CB + ni*CB + cb];
sum_X2[cp*CB + cb] += sumsq_N[cp*N*CB + ni*CB + cb];
}
}
}
}
for(j = 0; j < CP*CB; j++){
mean[j] = sum_X[j] / ((float)N * HW); /* E[X] */
var[j] = (sum_X2[j] / ((float)N * HW)) - (mean[j]*mean[j]); /* var(X) = E[X^2] - (E[X])^2 */
s[j] = 1.0f / ((float)sqrt(var[j] + eps)); /* s = 1/sqrt(var(X) + eps) [CP, CB] */
b[j] = -1 * mean[j] * s[j]; /* b = -E[X]/sqrt(var(X) + eps) [CP, CB] */
}
#pragma omp parallel for collapse(2)
for(n = 0; n < N; n++){ /* Data movement 2*N*CP*HW*CB */
for(cp = 0; cp < CP; cp++){
int cb, hw;
for(hw = 0; hw < HW; hw++){
for(cb = 0; cb < CB; cb++){
LIBXSMM_VLA_ACCESS(4, out, n, cp, hw, cb, CP, HW, CB) = ((LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB) * s[cp*CB + cb]) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + LIBXSMM_VLA_ACCESS(2, beta, cp, cb, CB); /* Normalization equation -> y = ((s*x + b)*gamma + beta) */
}
}
}
}
}
void scaler_batchnorm_bwd_fp32(long N, long CP, long HW, long CB, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, float eps) {
const float scale = 1.0f / ((float)N*HW);
LIBXSMM_ALIGNED(float a[CP*CB], 64);
LIBXSMM_ALIGNED(float b[CP*CB], 64);
LIBXSMM_ALIGNED(float c[CP*CB], 64);
LIBXSMM_ALIGNED(float ds[CP*CB], 64);
LIBXSMM_ALIGNED(float db[CP*CB], 64);
LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
/* LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB); */
/* LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB); */
LIBXSMM_ALIGNED(float dgamma_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float dbeta_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float ds_N[CP*N*CB], 64);
LIBXSMM_ALIGNED(float db_N[CP*N*CB], 64);
int n, cp, j;
for(j = 0; j < CP*CB; j++){ /* Initialize the arrays */
a[j] = 1.0f / ((float)sqrt(var[j] + eps));
b[j] = -a[j]*mean[j];
}
/* #pragma omp parallel for collapse(2) reduction(+: pdgamma[:CP*CB]) reduction(+: pdbeta[:CP*CB]) reduction(+: ds[:CP*CB]) reduction(+: db[:CP*CB]) */
/* for(int n = 0; n < N; n++){ */
/* for (int cp = 0; cp < CP; cp++) { */
/* for (int hw = 0; hw < HW; hw++){ */
/* for (int cb = 0; cb < CB; cb++) { */
/* pdgamma[cp*CB + cb] += (a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB); */
/* pdbeta[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB); */
/* ds[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) * LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB); */
/* db[cp*CB + cb] += LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB); */
/* } */
/* } */
/* } */
/* } */
#pragma omp parallel
{
#pragma omp for collapse(2)
for(n = 0; n < N; n++){
for (cp = 0; cp < CP; cp++) { /* dgamma += (a * inp + b) * dout , dbeta += dout, ds += dout * gamma * inp, db += dout * gamma */
int cb, hw;
LIBXSMM_ALIGNED(float lcl_dgamma_ptr[CB], 64);
LIBXSMM_ALIGNED(float lcl_dbeta_ptr[CB], 64);
LIBXSMM_ALIGNED(float lcl_ds_ptr[CB], 64);
LIBXSMM_ALIGNED(float lcl_db_ptr[CB], 64);
float *dgamma_ncp_ptr = &dgamma_N[cp*N*CB + n*CB];
float *dbeta_ncp_ptr = &dbeta_N[cp*N*CB + n*CB];
float *ds_ncp_ptr = &ds_N[cp*N*CB + n*CB];
float *db_ncp_ptr = &db_N[cp*N*CB + n*CB];
for (cb = 0; cb < CB; cb++) {
lcl_dgamma_ptr[cb] = 0.0f;
lcl_dbeta_ptr[cb] = 0.0f;
lcl_ds_ptr[cb] = 0.0f;
lcl_db_ptr[cb] = 0.0f;
}
for (hw = 0; hw < HW; hw++){
for (cb = 0; cb < CB; cb++) {
lcl_dgamma_ptr[cb] += (a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB) + b[cp*CB + cb]) * LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB);
lcl_dbeta_ptr[cb] += LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB);
lcl_ds_ptr[cb] += LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) * LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB);
lcl_db_ptr[cb] += LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB) * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB);
}
}
for (cb = 0; cb < CB; cb++) {
dgamma_ncp_ptr[cb] = lcl_dgamma_ptr[cb];
dbeta_ncp_ptr[cb] = lcl_dbeta_ptr[cb];
ds_ncp_ptr[cb] = lcl_ds_ptr[cb];
db_ncp_ptr[cb] = lcl_db_ptr[cb];
}
}
}
#pragma omp barrier
#pragma omp for
for (cp = 0; cp < CP; cp++) {
int cb, ni;
for (cb = 0; cb < CB; cb++) {
pdgamma[cp*CB + cb] = 0.0f;
pdbeta[cp*CB + cb] = 0.0f;
ds[cp*CB + cb] = 0.0f;
db[cp*CB + cb] = 0.0f;
}
for(ni = 0; ni < N; ni++){
for (cb = 0; cb < CB; cb++) {
pdgamma[cp*CB + cb] += dgamma_N[cp*N*CB + ni*CB + cb];
pdbeta[cp*CB + cb] += dbeta_N[cp*N*CB + ni*CB + cb];
ds[cp*CB + cb] += ds_N[cp*N*CB + ni*CB + cb];
db[cp*CB + cb] += db_N[cp*N*CB + ni*CB + cb];
}
}
}
}
/* b = (db * mean[nb] - ds) * a * a * a * scale; */
/* c = -b * mean[nb] - db * a * scale; */
for(j = 0; j < CP*CB; j++){
b[j] = (db[j] * mean[j] - ds[j]) * a[j] * a[j] * a[j] * scale;
c[j] = -b[j] * mean[j] - db[j] * a[j] * scale;
}
#pragma omp parallel for collapse(2)
for(n = 0; n < N; n++){ /* Parallelize over batches, Data movement 3*N*CP*HW*CB */
for (cp = 0; cp < CP; cp++) { /* din = dout * a * gamma + b * inp + c */
int cb, hw;
for (hw = 0; hw < HW; hw++){
for (cb = 0; cb < CB; cb++) {
LIBXSMM_VLA_ACCESS(4, din, n, cp, hw, cb, CP, HW, CB) = LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB) * a[cp*CB + cb] * LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) + b[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB) + c[cp*CB + cb];
/* LIBXSMM_VLA_ACCESS(4, din, n, cp, hw, cb, CP, HW, CB) = LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB) * a[cp*CB + cb] + b[cp*CB + cb] * LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB) + c[cp*CB + cb]; */
}
}
}
}
}
void reference_batchnorm_fwd_fp32(long N, long CP, long HW, long CB, float *pinp, float *pgamma, float *pbeta, float *mean, float *var, float *pout, float eps){
const float recp_nhw = 1.0f/((float)N*HW);
LIBXSMM_ALIGNED(float expectval_ptr[CP*CB], 64);
LIBXSMM_ALIGNED(float rcpstddev_ptr[CP*CB], 64);
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(4, float, out, pout, CP, HW, CB); /* [N, CP, HW, CB] */
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, beta, pbeta, CB);
int n, cp, hw, cb = 0; /* Since no blocking on channels */
for (cp = 0; cp < CP; cp++) {
float ch_sum = 0.0f;
float ch_sumsq = 0.0f;
float tbmean = 0.0f;
float tbmeansq = 0.0f;
float tsqbmean = 0.0f;
float tbrstd = 0.0f;
float tvariance = 0.0f;
for (n = 0; n < N; n++ ) {
for (hw = 0; hw < HW; hw++){
const float input_val = LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB);
ch_sum += input_val;
ch_sumsq += (input_val * input_val);
}
}
tbmean = recp_nhw * ch_sum;
tbmeansq = tbmean * tbmean;
tsqbmean = recp_nhw * ch_sumsq;
tvariance = tsqbmean - tbmeansq;
tbrstd = (float)(1.0/sqrt(tvariance + eps));
expectval_ptr[cp] = tbmean;
rcpstddev_ptr[cp] = tbrstd;
}
for (n = 0; n < N; n++ ) {
for (cp = 0; cp < CP; cp++ ) {
for(hw = 0; hw < HW; hw++){
const float input_val = LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB);
/* BN + scale (gamma, beta) */
LIBXSMM_VLA_ACCESS(4, out, n, cp, hw, cb, CP, HW, CB) = LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB)*(input_val - expectval_ptr[cp])*rcpstddev_ptr[cp] + LIBXSMM_VLA_ACCESS(2, beta, cp, cb, CB);
}
}
}
}
void reference_batchnorm_bwd_fp32(long N, long CP, long HW, long CB, float *pdout, float *pinp, float *mean, float *var, float *pgamma, float *pdin, float *pdgamma, float *pdbeta, float eps){
const float nhw = (float)N * HW;
const float recp_nhw = 1.0f/((float)N*HW);
LIBXSMM_ALIGNED(float expectval_ptr[CP*CB], 64);
LIBXSMM_ALIGNED(float rcpstddev_ptr[CP*CB], 64);
LIBXSMM_VLA_DECL(4, float, din, pdin, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, inp, pinp, CP, HW, CB);
LIBXSMM_VLA_DECL(4, float, dout, pdout, CP, HW, CB);
LIBXSMM_VLA_DECL(2, float, gamma, pgamma, CB);
LIBXSMM_VLA_DECL(2, float, dgamma, pdgamma, CB);
LIBXSMM_VLA_DECL(2, float, dbeta, pdbeta, CB);
printf("\n Using reference implementation \n");
int n, cp, hw, cb = 0; /* Since no blocking on channels */
for (cp = 0; cp < CP; cp++ ) {
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) = 0.0f;
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) = 0.0f;
expectval_ptr[cp] = mean[cp];
rcpstddev_ptr[cp] = (float)(1.0 / (sqrt(var[cp] + eps)));
for (n = 0; n < N; n++ ) {
for (hw = 0; hw < HW; hw++){
const float input_val = LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB);
float* del_output_ptr = &LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB);
LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) += (input_val - expectval_ptr[cp]) * (*del_output_ptr) * rcpstddev_ptr[cp];
LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) += *del_output_ptr;
}
}
}
for (n = 0; n < N; n++ ) {
for (cp = 0; cp < CP; cp++ ) {
for (hw = 0; hw < HW; hw++){
float* del_input_ptr = &LIBXSMM_VLA_ACCESS(4, din, n, cp, hw, cb, CP, HW, CB);
const float input_val = LIBXSMM_VLA_ACCESS(4, inp, n, cp, hw, cb, CP, HW, CB);
const float del_output_val = LIBXSMM_VLA_ACCESS(4, dout, n, cp, hw, cb, CP, HW, CB);
*del_input_ptr = LIBXSMM_VLA_ACCESS(2, gamma, cp, cb, CB) * rcpstddev_ptr[cp] * recp_nhw * (nhw * del_output_val -
(LIBXSMM_VLA_ACCESS(2, dbeta, cp, cb, CB) + (input_val - expectval_ptr[cp]) * LIBXSMM_VLA_ACCESS(2, dgamma, cp, cb, CB) * rcpstddev_ptr[cp]));
}
}
}
}
int main( int argc, char* argv[] ) {
libxsmm_blasint my_eqn10, my_eqn11, my_eqn12, my_eqn16;
libxsmm_matrix_eqn_function func10, func11, func12, func16;
libxsmm_meltw_unary_flags jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_NONE;
libxsmm_meltw_unary_type unary_type;
libxsmm_meltwfunction_unary reduce_HW_kernel;
const float eps = FLT_EPSILON;
libxsmm_blasint i, it, ld, tmp_ld, tmp_ld2;
unsigned long long l_start, l_end;
double l_total = 0, l_total2 = 0;
double t_vec = 0, t_tpp = 0;
libxsmm_matdiff_info norms_out;
float *inp, *out, *dinp, *dout, *eqn_dinp, *eqn_dout, *dbeta, *eqn_dbeta, *dgamma, *eqn_dgamma, *eqn_out, *gamma, *beta, *cache_fl, *mean, *var, sum = 0.0;
libxsmm_bfloat16 *bf16_inp, *bf16_out, *bf16_dinp, *bf16_dout, *bf16_eqn_dinp, *bf16_eqn_dout, *bf16_gamma, *bf16_beta, *bf16_eqn_out;
long N = 28;
long CP = 2;
long HW = 784;
long CB = 64;
long num_HW_blocks = 16;
int iters = 100;
int datatype_mode = 0;
libxsmm_datatype in_dt = LIBXSMM_DATATYPE_F32;
libxsmm_datatype out_dt = LIBXSMM_DATATYPE_F32;
if ( argc > 1 ) N = atoi(argv[1]);
if ( argc > 2 ) CP = atoi(argv[2]);
if ( argc > 3 ) HW = atoi(argv[3]);
if ( argc > 4 ) CB = atoi(argv[4]);
if ( argc > 5 ) num_HW_blocks = atoi(argv[5]);
if ( argc > 6 ) datatype_mode = atoi(argv[6]);
if ( argc > 7 ) iters = atoi(argv[7]);
if (datatype_mode == 0) {
in_dt = LIBXSMM_DATATYPE_F32;
out_dt = LIBXSMM_DATATYPE_F32;
} else if (datatype_mode == 1) {
in_dt = LIBXSMM_DATATYPE_BF16;
out_dt = LIBXSMM_DATATYPE_BF16;
} else {
printf("ERROR: Supporting only FP32 and BF16 precisions...\n");
}
inp = (float*) libxsmm_aligned_malloc( sizeof(float)*N*CP*HW*CB, 2097152);
out = (float*) libxsmm_aligned_malloc( sizeof(float)*N*CP*HW*CB, 2097152);
dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*N*CP*HW*CB, 2097152);
dout = (float*) libxsmm_aligned_malloc( sizeof(float)*N*CP*HW*CB, 2097152);
dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
eqn_dinp = (float*) libxsmm_aligned_malloc( sizeof(float)*N*CP*HW*CB, 2097152);
eqn_dout = (float*) libxsmm_aligned_malloc( sizeof(float)*N*CP*HW*CB, 2097152);
eqn_dgamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
eqn_dbeta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
gamma = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
beta = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
mean = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
var = (float*) libxsmm_aligned_malloc( sizeof(float)*CP*CB, 2097152);
eqn_out = (float*) libxsmm_aligned_malloc( sizeof(float)*N*CP*HW*CB, 2097152);
cache_fl = (float*) libxsmm_aligned_malloc( sizeof(float)*1024*1024, 2097152);
bf16_inp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*N*CP*HW*CB, 2097152);
bf16_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*N*CP*HW*CB, 2097152);
bf16_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*N*CP*HW*CB, 2097152);
bf16_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*N*CP*HW*CB, 2097152);
bf16_eqn_dinp = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*N*CP*HW*CB, 2097152);
bf16_eqn_dout = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*N*CP*HW*CB, 2097152);
bf16_gamma = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152);
bf16_beta = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*CP*CB, 2097152);
bf16_eqn_out = (libxsmm_bfloat16*) libxsmm_aligned_malloc( sizeof(libxsmm_bfloat16)*N*CP*HW*CB, 2097152);
libxsmm_init();
libxsmm_matdiff_clear(&norms_out);
/* Initializing arrays */
for ( i = 0; i < N*CP*HW*CB; ++i ) {
inp[i] = (float)libxsmm_rng_f64();
out[i] = (float)libxsmm_rng_f64();
eqn_out[i] = out[i];
dinp[i] = (float)libxsmm_rng_f64();
dout[i] = (float)libxsmm_rng_f64();
eqn_dinp[i] = dinp[i];
eqn_dout[i] = dout[i];
libxsmm_rne_convert_fp32_bf16( &inp[i], &bf16_inp[i], 1 );
libxsmm_rne_convert_fp32_bf16( &out[i], &bf16_out[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_out[i], &bf16_eqn_out[i], 1 );
libxsmm_rne_convert_fp32_bf16( &dout[i], &bf16_dout[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_dout[i], &bf16_eqn_dout[i], 1 );
libxsmm_rne_convert_fp32_bf16( &dinp[i], &bf16_dinp[i], 1 );
libxsmm_rne_convert_fp32_bf16( &eqn_dinp[i], &bf16_eqn_dinp[i], 1 );
}
for ( i = 0; i < CP*CB; ++i ) {
gamma[i] = (float)libxsmm_rng_f64();
beta[i] = (float)libxsmm_rng_f64();
dbeta[i] = (float)libxsmm_rng_f64();
dgamma[i] = (float)libxsmm_rng_f64();
eqn_dbeta[i] = dbeta[i];
eqn_dgamma[i] = dgamma[i];
libxsmm_rne_convert_fp32_bf16( &gamma[i], &bf16_gamma[i], 1 );
libxsmm_rne_convert_fp32_bf16( &beta[i], &bf16_beta[i], 1 );
}
for (i = 0; i < 1024 * 1024; i++ ) {
cache_fl[i] = (float)libxsmm_rng_f64();
}
libxsmm_blasint ldo = CB;
libxsmm_meltwfunction_unary all_zero_kernel = libxsmm_dispatch_meltw_unary(CB, 1, NULL, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_XOR);
if ( all_zero_kernel == NULL) {
fprintf( stderr, "JIT for initialization by unary all zero copy kernel failed. Bailing...!\n");
exit(-1);
}
libxsmm_meltwfunction_binary add_kernel = libxsmm_dispatch_meltw_binary(CB, 1, &ldo, &ldo, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_MELTW_TYPE_BINARY_ADD);
if ( add_kernel == NULL) {
fprintf( stderr, "JIT for initialization of add kernel failed. Bailing...!\n");
exit(-1);
}
libxsmm_meltwfunction_unary copy_kernel = libxsmm_dispatch_meltw_unary(CB, 1, &ldo, &ldo, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_MELTW_FLAG_UNARY_NONE, LIBXSMM_MELTW_TYPE_UNARY_IDENTITY);
if ( copy_kernel == NULL) {
fprintf( stderr, "JIT for initialization by copy kernel failed. Bailing...!\n");
exit(-1);
}
/* TPPs for reducing X and X2 in HW*/
ld = CB;
tmp_ld = CB;
unary_type = LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_X2_OP_ADD;
jit_reduce_flags = LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS;
reduce_HW_kernel = libxsmm_dispatch_meltw_unary(CB, HW/num_HW_blocks, &ld, &tmp_ld, in_dt, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, jit_reduce_flags, unary_type);
/* TPP for scaling */
ld = CB;
tmp_ld = 1;
tmp_ld2 = 1;
my_eqn10 = libxsmm_matrix_eqn_create(); /* y = (s*x + b)*gamma + beta */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn10, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* x = [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 1, 0, LIBXSMM_DATATYPE_F32 ); /* s = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 3, 0, in_dt ); /* gamma = [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn10, CB, 1, tmp_ld2, 4, 0, in_dt ); /* beta = [CB] */
/* libxsmm_matrix_eqn_tree_print( my_eqn10 ); */
/* libxsmm_matrix_eqn_rpn_print( my_eqn10 ); */
func10 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, out_dt, my_eqn10 ); /* y = [HW, CB] */
/* Check correctness */
if (datatype_mode == 0) {
tpp_batchnorm_fwd_fp32(N, CP, HW, CB, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, all_zero_kernel, add_kernel, copy_kernel);
if(CB == 1)
reference_batchnorm_fwd_fp32(N, CP, HW, CB, inp, gamma, beta, mean, var, out, eps);
else
scaler_batchnorm_fwd_fp32(N, CP, HW, CB, inp, gamma, beta, mean, var, out, eps);
} else if (datatype_mode == 1) {
tpp_batchnorm_fwd_bf16(N, CP, HW, CB, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, all_zero_kernel, add_kernel, copy_kernel);
scaler_batchnorm_fwd_fp32(N, CP, HW, CB, inp, gamma, beta, mean, var, out, eps);
for ( i = 0; i < N*CP*HW*CB; ++i ) {
/* out[i] = upconvert_bf16(bf16_out[i]); */
eqn_out[i] = upconvert_bf16(bf16_eqn_out[i]);
}
}
/* compare */
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 FWD Batchnorm - Output #\n");
} else {
printf("# Correctness BF16 FWD Batchnorm - Output #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, N*CP*HW*CB, 1, out, eqn_out, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
if (datatype_mode == 0) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_batchnorm_fwd_fp32(N, CP, HW, CB, inp, gamma, beta, mean, var, out, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_batchnorm_fwd_fp32(N, CP, HW, CB, inp, gamma, beta, mean, var, out, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler batchnorm time FWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_batchnorm_fwd_fp32(N, CP, HW, CB, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, all_zero_kernel, add_kernel, copy_kernel);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_batchnorm_fwd_fp32(N, CP, HW, CB, num_HW_blocks, inp, gamma, beta, mean, var, eqn_out, eps, func10, reduce_HW_kernel, all_zero_kernel, add_kernel, copy_kernel);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP batchnorm time FWD = %.5g\n", ((double)(l_total2)));
printf("Speedup FWD is %.5g\n", l_total/l_total2);
} else if (datatype_mode == 1) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_batchnorm_fwd_fp32(N, CP, HW, CB, inp, gamma, beta, mean, var, out, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_batchnorm_fwd_fp32(N, CP, HW, CB, inp, gamma, beta, mean, var, out, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler batchnorm (FP32) time FWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_batchnorm_fwd_bf16(N, CP, HW, CB, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, all_zero_kernel, add_kernel, copy_kernel);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_batchnorm_fwd_bf16(N, CP, HW, CB, num_HW_blocks, bf16_inp, bf16_gamma, bf16_beta, mean, var, bf16_eqn_out, eps, func10, reduce_HW_kernel, all_zero_kernel, add_kernel, copy_kernel);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP batchnorm (BF16) time FWD = %.5g\n", ((double)(l_total2)));
printf("Speedup FWD is %.5g\n", l_total/l_total2);
}
/* Create MatEq for bwd layernorm */
ld = CB;
tmp_ld2 = 1;
/* dgamma function */
my_eqn11 = libxsmm_matrix_eqn_create(); /* dgamma = ((inp *a + b) * dout) + dgamma */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* dgamma = ((inp *a + b) * dout) + dgamma */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn11, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_binary_op(my_eqn11, LIBXSMM_MELTW_TYPE_BINARY_MUL, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32); /* ((inp *a + b) * dout) */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn11, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn11, CB, 1, 1, 4, 0, LIBXSMM_DATATYPE_F32 ); /* dgamma [CB] */
/* libxsmm_matrix_eqn_tree_print( my_eqn11 ); */
/* libxsmm_matrix_eqn_rpn_print( my_eqn11 ); */
func11 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn11 ); /* dgamma [CB] */
/* dbeta function */
my_eqn12 = libxsmm_matrix_eqn_create(); /* dbeta [CB] = dout [HW, CB] + dbeta [CB] */
libxsmm_matrix_eqn_push_back_binary_op( my_eqn12, LIBXSMM_MELTW_TYPE_BINARY_ADD, LIBXSMM_MELTW_FLAG_BINARY_NONE, LIBXSMM_DATATYPE_F32 ); /* dbeta_tmp [HW, CB] */
libxsmm_matrix_eqn_push_back_unary_op(my_eqn12, LIBXSMM_MELTW_TYPE_UNARY_REDUCE_X_OP_ADD, LIBXSMM_MELTW_FLAG_UNARY_REDUCE_COLS, LIBXSMM_DATATYPE_F32); /* [HW, CB] -> [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn12, CB, 1, 1, 5, 0, LIBXSMM_DATATYPE_F32 ); /* dbeta [CB] */
/* libxsmm_matrix_eqn_tree_print( my_eqn12 ); */
/* libxsmm_matrix_eqn_rpn_print( my_eqn12 ); */
func12 = libxsmm_dispatch_matrix_eqn( CB, 1, &tmp_ld2, LIBXSMM_DATATYPE_F32, my_eqn12 ); /* dbeta [CB] */
/* din = gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (nhw*del_output_ptr[v] - (del_beta_ptr[v] + (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v])) */
/* din = gamma_ptr[v] * brstd_ptr[v] *del_output_ptr[v] - gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (del_beta_ptr[v] + (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v])) */
/* din = gamma_ptr[v] * brstd_ptr[v] *del_output_ptr[v] - gamma_ptr[v] * brstd_ptr[v] * recp_nhw * del_beta_ptr[v] + gamma_ptr[v] * brstd_ptr[v] * recp_nhw * (input_ptr[v] - bmean_ptr[v]) * del_gamma_ptr[v] * brstd_ptr[v]) */
/* din = a * del_output_ptr[v] + b * input_ptr[v] + c */
/* a = gamma_ptr[CB] * brstd_ptr[CB] */
/* b = gamma_ptr[CB] * del_gamma_ptr[v] * brstd_ptr[CB] * brstd_ptr[CB] * recp_nhw */
/* c = -gamma_ptr[CB] * brstd_ptr[CB] * recp_nhw * del_beta_ptr[CB] + gamma_ptr[CB] * brstd_ptr[CB] * recp_nhw * bmean_ptr[CB] * del_gamma_ptr[CB] * brstd_ptr[CB]) */
/* din long equation */
my_eqn16 = libxsmm_matrix_eqn_create(); /* din = a * dout + (b * inp + c) */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn16, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_0 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn16, CB, 1, 1, 1, 0, LIBXSMM_DATATYPE_F32 ); /* a [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn16, CB, HW/num_HW_blocks, ld, 3, 0, in_dt ); /* dout [HW, CB] */
libxsmm_matrix_eqn_push_back_ternary_op( my_eqn16, LIBXSMM_MELTW_TYPE_TERNARY_MULADD, LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_1 | LIBXSMM_MELTW_FLAG_TERNARY_BCAST_COL_IN_2 | LIBXSMM_MELTW_FLAG_TERNARY_REUSE_IN_2_AS_OUT, LIBXSMM_DATATYPE_F32 );
libxsmm_matrix_eqn_push_back_arg( my_eqn16, CB, HW/num_HW_blocks, ld, 0, 0, in_dt ); /* inp [HW, CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn16, CB, 1, 1, 2, 0, LIBXSMM_DATATYPE_F32 ); /* b [CB] */
libxsmm_matrix_eqn_push_back_arg( my_eqn16, CB, 1, 1, 7, 0, LIBXSMM_DATATYPE_F32 ); /* c [CB] */
/* libxsmm_matrix_eqn_tree_print( my_eqn16 ); */
/* libxsmm_matrix_eqn_rpn_print( my_eqn16 ); */
func16 = libxsmm_dispatch_matrix_eqn( CB, HW/num_HW_blocks, &ld, in_dt, my_eqn16 ); /* din [HW, CB] */
if (datatype_mode == 0) {
tpp_batchnorm_bwd_fp32(N, CP, HW, CB, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func16, eps, all_zero_kernel, add_kernel, copy_kernel);
if (CB == 1)
reference_batchnorm_bwd_fp32(N, CP, HW, CB, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
else
scaler_batchnorm_bwd_fp32(N, CP, HW, CB, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
} else if (datatype_mode == 1) {
tpp_batchnorm_bwd_bf16(N, CP, HW, CB, num_HW_blocks, bf16_eqn_dout, bf16_inp, mean, var, bf16_gamma, bf16_eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func16, eps, all_zero_kernel, add_kernel, copy_kernel);
scaler_batchnorm_bwd_fp32(N, CP, HW, CB, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
for ( i = 0; i < N*CP*HW*CB; ++i ) {
/* dinp[i] = upconvert_bf16(bf16_dinp[i]); */
eqn_dinp[i] = upconvert_bf16(bf16_eqn_dinp[i]);
}
}
/* compare */
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Batchnorm - Dinput #\n");
} else {
printf("# Correctness BF16 BWD Batchnorm - Dinput #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, N*CP*HW*CB, 1, dinp, eqn_dinp, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
printf("###########################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Batchnorm - Dbeta #\n");
} else {
printf("# Correctness BF16 BWD Batchnorm - Dbeta #\n");
}
printf("###########################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dbeta, eqn_dbeta, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
printf("############################################\n");
if (datatype_mode == 0) {
printf("# Correctness FP32 BWD Batchnorm - Dgamma #\n");
} else {
printf("# Correctness BF16 BWD Batchnorm - Dgamma #\n");
}
printf("############################################\n");
libxsmm_matdiff(&norms_out, LIBXSMM_DATATYPE_F32, CP*CB, 1, dgamma, eqn_dgamma, 0, 0);
printf("L1 reference : %.25g\n", norms_out.l1_ref);
printf("L1 test : %.25g\n", norms_out.l1_tst);
printf("L2 abs.error : %.24f\n", norms_out.l2_abs);
printf("L2 rel.error : %.24f\n", norms_out.l2_rel);
printf("Linf abs.error: %.24f\n", norms_out.linf_abs);
printf("Linf rel.error: %.24f\n", norms_out.linf_rel);
printf("Check-norm : %.24f\n\n", norms_out.normf_rel);
if (datatype_mode == 0) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_batchnorm_bwd_fp32(N, CP, HW, CB, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_batchnorm_bwd_fp32(N, CP, HW, CB, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler batchnorm time BWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_batchnorm_bwd_fp32(N, CP, HW, CB, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func16, eps, all_zero_kernel, add_kernel, copy_kernel);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_batchnorm_bwd_fp32(N, CP, HW, CB, num_HW_blocks, eqn_dout, inp, mean, var, gamma, eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func16, eps, all_zero_kernel, add_kernel, copy_kernel);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP batchnorm time BWD = %.5g\n", ((double)(l_total2)));
printf("Speedup BWD is %.5g\n", l_total/l_total2);
} else if (datatype_mode == 1) {
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i];
}
scaler_batchnorm_bwd_fp32(N, CP, HW, CB, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
scaler_batchnorm_bwd_fp32(N, CP, HW, CB, dout, inp, mean, var, gamma, dinp, dgamma, dbeta, eps);
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
printf("Scaler batchnorm (FP32) time BWD = %.5g\n", ((double)(l_total)));
for (i = 0; i < 1024 * 1024; i++ ) {
sum += cache_fl[i] + (float)l_total;
}
tpp_batchnorm_bwd_bf16(N, CP, HW, CB, num_HW_blocks, bf16_eqn_dout, bf16_inp, mean, var, bf16_gamma, bf16_eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func16, eps, all_zero_kernel, add_kernel, copy_kernel);
l_start = libxsmm_timer_tick();
for (it = 0; it < iters; it++) {
tpp_batchnorm_bwd_bf16(N, CP, HW, CB, num_HW_blocks, bf16_eqn_dout, bf16_inp, mean, var, bf16_gamma, bf16_eqn_dinp, eqn_dgamma, eqn_dbeta, func11, func12, func16, eps, all_zero_kernel, add_kernel, copy_kernel);
}
l_end = libxsmm_timer_tick();
l_total2 = libxsmm_timer_duration(l_start, l_end);
printf("TPP batchnorm (BF16) time BWD = %.5g\n", ((double)(l_total2)));
printf("Speedup BWD is %.5g\n", l_total/l_total2);
}
/* printf("Running sum is %.5f\n", sum); */
t_tpp += l_total2;
t_vec += l_total;
printf("\n\n=================================\n");
printf("Total Speedup via TPP Matrix equation is %.5g\n", t_vec/t_tpp);
printf("=================================\n");
libxsmm_free(inp);
libxsmm_free(out);
libxsmm_free(dinp);
libxsmm_free(dout);
libxsmm_free(eqn_dinp);
libxsmm_free(eqn_dout);
libxsmm_free(bf16_dinp);
libxsmm_free(bf16_dout);
libxsmm_free(bf16_eqn_dinp);
libxsmm_free(bf16_eqn_dout);
libxsmm_free(dgamma);
libxsmm_free(dbeta);
libxsmm_free(eqn_dgamma);
libxsmm_free(eqn_dbeta);
libxsmm_free(mean);
libxsmm_free(var);
libxsmm_free(gamma);
libxsmm_free(beta);
libxsmm_free(eqn_out);
libxsmm_free(bf16_inp);
libxsmm_free(bf16_out);
libxsmm_free(bf16_gamma);
libxsmm_free(bf16_beta);
libxsmm_free(bf16_eqn_out);
libxsmm_free(cache_fl);
return 0;
}
|
munit.c | /* Copyright (c) 2013-2018 Evan Nemerson <evan@nemerson.com>
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/*** Configuration ***/
/* This is just where the output from the test goes. It's really just
* meant to let you choose stdout or stderr, but if anyone really want
* to direct it to a file let me know, it would be fairly easy to
* support. */
#if !defined(MUNIT_OUTPUT_FILE)
# define MUNIT_OUTPUT_FILE stdout
#endif
/* This is a bit more useful; it tells µnit how to format the seconds in
* timed tests. If your tests run for longer you might want to reduce
* it, and if your computer is really fast and your tests are tiny you
* can increase it. */
#if !defined(MUNIT_TEST_TIME_FORMAT)
# define MUNIT_TEST_TIME_FORMAT "0.8f"
#endif
/* If you have long test names you might want to consider bumping
* this. The result information takes 43 characters. */
#if !defined(MUNIT_TEST_NAME_LEN)
# define MUNIT_TEST_NAME_LEN 37
#endif
/* If you don't like the timing information, you can disable it by
* defining MUNIT_DISABLE_TIMING. */
#if !defined(MUNIT_DISABLE_TIMING)
# define MUNIT_ENABLE_TIMING
#endif
/*** End configuration ***/
#if defined(_POSIX_C_SOURCE) && (_POSIX_C_SOURCE < 200809L)
# undef _POSIX_C_SOURCE
#endif
#if !defined(_POSIX_C_SOURCE)
# define _POSIX_C_SOURCE 200809L
#endif
/* Solaris freaks out if you try to use a POSIX or SUS standard without
* the "right" C standard. */
#if defined(_XOPEN_SOURCE)
# undef _XOPEN_SOURCE
#endif
#if defined(__STDC_VERSION__)
# if __STDC_VERSION__ >= 201112L
# define _XOPEN_SOURCE 700
# elif __STDC_VERSION__ >= 199901L
# define _XOPEN_SOURCE 600
# endif
#endif
/* Because, according to Microsoft, POSIX is deprecated. You've got
* to appreciate the chutzpah. */
#if defined(_MSC_VER) && !defined(_CRT_NONSTDC_NO_DEPRECATE)
# define _CRT_NONSTDC_NO_DEPRECATE
#endif
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L)
# include <stdbool.h>
#elif defined(_WIN32)
/* https://msdn.microsoft.com/en-us/library/tf4dy80a.aspx */
#endif
#include <limits.h>
#include <time.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <setjmp.h>
#if !defined(MUNIT_NO_NL_LANGINFO) && !defined(_WIN32)
#define MUNIT_NL_LANGINFO
#include <locale.h>
#include <langinfo.h>
#include <strings.h>
#endif
#if !defined(_WIN32)
# include <unistd.h>
# include <sys/types.h>
# include <sys/wait.h>
#else
# include <windows.h>
# include <io.h>
# include <fcntl.h>
# if !defined(STDERR_FILENO)
# define STDERR_FILENO _fileno(stderr)
# endif
#endif
#include "munit.h"
#define MUNIT_STRINGIFY(x) #x
#define MUNIT_XSTRINGIFY(x) MUNIT_STRINGIFY(x)
#if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
# define MUNIT_THREAD_LOCAL __thread
#elif (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) || defined(_Thread_local)
# define MUNIT_THREAD_LOCAL _Thread_local
#elif defined(_WIN32)
# define MUNIT_THREAD_LOCAL __declspec(thread)
#endif
/* MSVC 12.0 will emit a warning at /W4 for code like 'do { ... }
* while (0)', or 'do { ... } while (1)'. I'm pretty sure nobody
* at Microsoft compiles with /W4. */
#if defined(_MSC_VER) && (_MSC_VER <= 1800)
#pragma warning(disable: 4127)
#endif
#if defined(_WIN32) || defined(__EMSCRIPTEN__)
# define MUNIT_NO_FORK
#endif
#if defined(__EMSCRIPTEN__)
# define MUNIT_NO_BUFFER
#endif
/*** Logging ***/
static MunitLogLevel munit_log_level_visible = MUNIT_LOG_INFO;
static MunitLogLevel munit_log_level_fatal = MUNIT_LOG_ERROR;
#if defined(MUNIT_THREAD_LOCAL)
static MUNIT_THREAD_LOCAL munit_bool munit_error_jmp_buf_valid = 0;
static MUNIT_THREAD_LOCAL jmp_buf munit_error_jmp_buf;
#endif
/* At certain warning levels, mingw will trigger warnings about
* suggesting the format attribute, which we've explicity *not* set
* because it will then choke on our attempts to use the MS-specific
* I64 modifier for size_t (which we have to use since MSVC doesn't
* support the C99 z modifier). */
#if defined(__MINGW32__) || defined(__MINGW64__)
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wsuggest-attribute=format"
#endif
MUNIT_PRINTF(5, 0)
static void
munit_logf_exv(MunitLogLevel level, FILE *fp, const char *filename, int line, const char *format, va_list ap) {
if (level < munit_log_level_visible)
return;
switch (level) {
case MUNIT_LOG_DEBUG:fputs("Debug", fp);
break;
case MUNIT_LOG_INFO:fputs("Info", fp);
break;
case MUNIT_LOG_WARNING:fputs("Warning", fp);
break;
case MUNIT_LOG_ERROR:fputs("Error", fp);
break;
default:munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Invalid log level (%d)", level);
return;
}
fputs(": ", fp);
if (filename != NULL)
fprintf(fp, "%s:%d: ", filename, line);
vfprintf(fp, format, ap);
fputc('\n', fp);
}
MUNIT_PRINTF(3, 4)
static void
munit_logf_internal(MunitLogLevel level, FILE *fp, const char *format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, fp, NULL, 0, format, ap);
va_end(ap);
}
static void
munit_log_internal(MunitLogLevel level, FILE *fp, const char *message) {
munit_logf_internal(level, fp, "%s", message);
}
void
munit_logf_ex(MunitLogLevel level, const char *filename, int line, const char *format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(level, stderr, filename, line, format, ap);
va_end(ap);
if (level >= munit_log_level_fatal) {
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
}
void
munit_errorf_ex(const char *filename, int line, const char *format, ...) {
va_list ap;
va_start(ap, format);
munit_logf_exv(MUNIT_LOG_ERROR, stderr, filename, line, format, ap);
va_end(ap);
#if defined(MUNIT_THREAD_LOCAL)
if (munit_error_jmp_buf_valid)
longjmp(munit_error_jmp_buf, 1);
#endif
abort();
}
#if defined(__MINGW32__) || defined(__MINGW64__)
#pragma GCC diagnostic pop
#endif
#if !defined(MUNIT_STRERROR_LEN)
# define MUNIT_STRERROR_LEN 80
#endif
static void
munit_log_errno(MunitLogLevel level, FILE *fp, const char *msg) {
#if defined(MUNIT_NO_STRERROR_R) || (defined(__MINGW32__) && !defined(MINGW_HAS_SECURE_API))
munit_logf_internal(level, fp, "%s: %s (%d)", msg, strerror(errno), errno);
#else
char munit_error_str[MUNIT_STRERROR_LEN];
munit_error_str[0] = '\0';
#if !defined(_WIN32)
strerror_r(errno, munit_error_str, MUNIT_STRERROR_LEN);
#else
strerror_s(munit_error_str, MUNIT_STRERROR_LEN, errno);
#endif
munit_logf_internal(level, fp, "%s: %s (%d)", msg, munit_error_str, errno);
#endif
}
/*** Memory allocation ***/
void *
munit_malloc_ex(const char *filename, int line, size_t size) {
void *ptr;
if (size == 0)
return NULL;
ptr = calloc(1, size);
if (MUNIT_UNLIKELY(ptr == NULL)) {
munit_logf_ex(MUNIT_LOG_ERROR, filename, line, "Failed to allocate %" MUNIT_SIZE_MODIFIER "u bytes.", size);
}
return ptr;
}
/*** Timer code ***/
#if defined(MUNIT_ENABLE_TIMING)
#define psnip_uint64_t munit_uint64_t
#define psnip_uint32_t munit_uint32_t
/* Code copied from portable-snippets
* <https://github.com/nemequ/portable-snippets/>. If you need to
* change something, please do it there so we can keep the code in
* sync. */
/* Clocks (v1)
* Portable Snippets - https://gitub.com/nemequ/portable-snippets
* Created by Evan Nemerson <evan@nemerson.com>
*
* To the extent possible under law, the authors have waived all
* copyright and related or neighboring rights to this code. For
* details, see the Creative Commons Zero 1.0 Universal license at
* https://creativecommons.org/publicdomain/zero/1.0/
*/
#if !defined(PSNIP_CLOCK_H)
#define PSNIP_CLOCK_H
#if !defined(psnip_uint64_t)
# include "../exact-int/exact-int.h"
#endif
#if !defined(PSNIP_CLOCK_STATIC_INLINE)
# if defined(__GNUC__)
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES __attribute__((__unused__))
# else
# define PSNIP_CLOCK__COMPILER_ATTRIBUTES
# endif
# define PSNIP_CLOCK__FUNCTION PSNIP_CLOCK__COMPILER_ATTRIBUTES static
#endif
enum PsnipClockType {
/* This clock provides the current time, in units since 1970-01-01
* 00:00:00 UTC not including leap seconds. In other words, UNIX
* time. Keep in mind that this clock doesn't account for leap
* seconds, and can go backwards (think NTP adjustments). */
PSNIP_CLOCK_TYPE_WALL = 1,
/* The CPU time is a clock which increases only when the current
* process is active (i.e., it doesn't increment while blocking on
* I/O). */
PSNIP_CLOCK_TYPE_CPU = 2,
/* Monotonic time is always running (unlike CPU time), but it only
ever moves forward unless you reboot the system. Things like NTP
adjustments have no effect on this clock. */
PSNIP_CLOCK_TYPE_MONOTONIC = 3
};
struct PsnipClockTimespec {
psnip_uint64_t seconds;
psnip_uint64_t nanoseconds;
};
/* Methods we support: */
#define PSNIP_CLOCK_METHOD_CLOCK_GETTIME 1
#define PSNIP_CLOCK_METHOD_TIME 2
#define PSNIP_CLOCK_METHOD_GETTIMEOFDAY 3
#define PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER 4
#define PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME 5
#define PSNIP_CLOCK_METHOD_CLOCK 6
#define PSNIP_CLOCK_METHOD_GETPROCESSTIMES 7
#define PSNIP_CLOCK_METHOD_GETRUSAGE 8
#define PSNIP_CLOCK_METHOD_GETSYSTEMTIMEPRECISEASFILETIME 9
#define PSNIP_CLOCK_METHOD_GETTICKCOUNT64 10
#include <assert.h>
#if defined(HEDLEY_UNREACHABLE)
# define PSNIP_CLOCK_UNREACHABLE() HEDLEY_UNREACHABLE()
#else
# define PSNIP_CLOCK_UNREACHABLE() assert(0)
#endif
/* Choose an implementation */
/* #undef PSNIP_CLOCK_WALL_METHOD */
/* #undef PSNIP_CLOCK_CPU_METHOD */
/* #undef PSNIP_CLOCK_MONOTONIC_METHOD */
/* We want to be able to detect the libc implementation, so we include
<limits.h> (<features.h> isn't available everywhere). */
#if defined(__unix__) || defined(__unix) || defined(__linux__)
# include <limits.h>
# include <unistd.h>
#endif
#if defined(_POSIX_TIMERS) && (_POSIX_TIMERS > 0)
/* These are known to work without librt. If you know of others
* please let us know so we can add them. */
# if \
(defined(__GLIBC__) && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 17))) || \
(defined(__FreeBSD__))
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# elif !defined(PSNIP_CLOCK_NO_LIBRT)
# define PSNIP_CLOCK_HAVE_CLOCK_GETTIME
# endif
#endif
#if defined(_WIN32)
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_GETPROCESSTIMES
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
# endif
#endif
#if defined(__MACH__) && !defined(__gnu_hurd__)
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
# endif
#endif
#if defined(PSNIP_CLOCK_HAVE_CLOCK_GETTIME)
# include <time.h>
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# if defined(CLOCK_REALTIME_PRECISE)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME_PRECISE
# elif !defined(__sun)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_WALL CLOCK_REALTIME
# endif
# endif
# if !defined(PSNIP_CLOCK_CPU_METHOD)
# if defined(_POSIX_CPUTIME) || defined(CLOCK_PROCESS_CPUTIME_ID)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_PROCESS_CPUTIME_ID
# elif defined(CLOCK_VIRTUAL)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_CPU CLOCK_VIRTUAL
# endif
# endif
# if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
# if defined(CLOCK_MONOTONIC_RAW)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# elif defined(CLOCK_MONOTONIC_PRECISE)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC_PRECISE
# elif defined(_POSIX_MONOTONIC_CLOCK) || defined(CLOCK_MONOTONIC)
# define PSNIP_CLOCK_MONOTONIC_METHOD PSNIP_CLOCK_METHOD_CLOCK_GETTIME
# define PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC CLOCK_MONOTONIC
# endif
# endif
#endif
#if defined(_POSIX_VERSION) && (_POSIX_VERSION >= 200112L)
# if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_GETTIMEOFDAY
# endif
#endif
#if !defined(PSNIP_CLOCK_WALL_METHOD)
# define PSNIP_CLOCK_WALL_METHOD PSNIP_CLOCK_METHOD_TIME
#endif
#if !defined(PSNIP_CLOCK_CPU_METHOD)
# define PSNIP_CLOCK_CPU_METHOD PSNIP_CLOCK_METHOD_CLOCK
#endif
/* Primarily here for testing. */
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD) && defined(PSNIP_CLOCK_REQUIRE_MONOTONIC)
# error No monotonic clock found.
#endif
/* Implementations */
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_TIME))
# include <time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY))
# include <sys/time.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES)) || \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64))
# include <windows.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE))
# include <sys/time.h>
# include <sys/resource.h>
#endif
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME))
# include <CoreServices/CoreServices.h>
# include <mach/mach.h>
# include <mach/mach_time.h>
#endif
/*** Implementations ***/
#define PSNIP_CLOCK_NSEC_PER_SEC ((psnip_uint32_t) (1000000000ULL))
#if \
(defined(PSNIP_CLOCK_CPU_METHOD) && (PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_WALL_METHOD) && (PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME)) || \
(defined(PSNIP_CLOCK_MONOTONIC_METHOD) && (PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME))
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock__clock_getres(clockid_t clk_id) {
struct timespec res;
int r;
r = clock_getres(clk_id, &res);
if (r != 0)
return 0;
return (psnip_uint32_t) (PSNIP_CLOCK_NSEC_PER_SEC / res.tv_nsec);
}
PSNIP_CLOCK__FUNCTION int
psnip_clock__clock_gettime(clockid_t clk_id, struct PsnipClockTimespec *res) {
struct timespec ts;
if (clock_gettime(clk_id, &ts) != 0)
return -10;
res->seconds = (psnip_uint64_t) (ts.tv_sec);
res->nanoseconds = (psnip_uint64_t) (ts.tv_nsec);
return 0;
}
#endif
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_wall_get_precision(void) {
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_WALL);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
return 1000000;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
return 1;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_wall_get_time(struct PsnipClockTimespec *res) {
(void) res;
#if !defined(PSNIP_CLOCK_WALL_METHOD)
return -2;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_WALL, res);
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_TIME
res->seconds = time(NULL);
res->nanoseconds = 0;
#elif defined(PSNIP_CLOCK_WALL_METHOD) && PSNIP_CLOCK_WALL_METHOD == PSNIP_CLOCK_METHOD_GETTIMEOFDAY
struct timeval tv;
if (gettimeofday(&tv, NULL) != 0)
return -6;
res->seconds = tv.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_cpu_get_precision(void) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_CPU);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
return CLOCKS_PER_SEC;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
return PSNIP_CLOCK_NSEC_PER_SEC / 100;
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_cpu_get_time(struct PsnipClockTimespec *res) {
#if !defined(PSNIP_CLOCK_CPU_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_CPU, res);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_CLOCK
clock_t t = clock();
if (t == ((clock_t) -1))
return -5;
res->seconds = t / CLOCKS_PER_SEC;
res->nanoseconds = (t % CLOCKS_PER_SEC) * (PSNIP_CLOCK_NSEC_PER_SEC / CLOCKS_PER_SEC);
#elif defined(PSNIP_CLOCK_CPU_METHOD) && PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETPROCESSTIMES
FILETIME CreationTime, ExitTime, KernelTime, UserTime;
LARGE_INTEGER date, adjust;
if (!GetProcessTimes(GetCurrentProcess(), &CreationTime, &ExitTime, &KernelTime, &UserTime))
return -7;
/* http://www.frenk.com/2009/12/convert-filetime-to-unix-timestamp/ */
date.HighPart = UserTime.dwHighDateTime;
date.LowPart = UserTime.dwLowDateTime;
adjust.QuadPart = 11644473600000 * 10000;
date.QuadPart -= adjust.QuadPart;
res->seconds = date.QuadPart / 10000000;
res->nanoseconds = (date.QuadPart % 10000000) * (PSNIP_CLOCK_NSEC_PER_SEC / 100);
#elif PSNIP_CLOCK_CPU_METHOD == PSNIP_CLOCK_METHOD_GETRUSAGE
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) != 0)
return -8;
res->seconds = usage.ru_utime.tv_sec;
res->nanoseconds = tv.tv_usec * 1000;
#else
(void) res;
return -2;
#endif
return 0;
}
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_monotonic_get_precision(void) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
return 0;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_getres(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
return (psnip_uint32_t) (tbi.numer / tbi.denom);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
return 1000;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER Frequency;
QueryPerformanceFrequency(&Frequency);
return (psnip_uint32_t) ((Frequency.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC) ? PSNIP_CLOCK_NSEC_PER_SEC : Frequency.QuadPart);
#else
return 0;
#endif
}
PSNIP_CLOCK__FUNCTION int
psnip_clock_monotonic_get_time(struct PsnipClockTimespec *res) {
#if !defined(PSNIP_CLOCK_MONOTONIC_METHOD)
(void) res;
return -2;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_CLOCK_GETTIME
return psnip_clock__clock_gettime(PSNIP_CLOCK_CLOCK_GETTIME_MONOTONIC, res);
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_MACH_ABSOLUTE_TIME
psnip_uint64_t nsec = mach_absolute_time();
static mach_timebase_info_data_t tbi = { 0, };
if (tbi.denom == 0)
mach_timebase_info(&tbi);
nsec *= ((psnip_uint64_t) tbi.numer) / ((psnip_uint64_t) tbi.denom);
res->seconds = nsec / PSNIP_CLOCK_NSEC_PER_SEC;
res->nanoseconds = nsec % PSNIP_CLOCK_NSEC_PER_SEC;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_QUERYPERFORMANCECOUNTER
LARGE_INTEGER t, f;
if (QueryPerformanceCounter(&t) == 0)
return -12;
QueryPerformanceFrequency(&f);
res->seconds = t.QuadPart / f.QuadPart;
res->nanoseconds = t.QuadPart % f.QuadPart;
if (f.QuadPart > PSNIP_CLOCK_NSEC_PER_SEC)
res->nanoseconds /= f.QuadPart / PSNIP_CLOCK_NSEC_PER_SEC;
else
res->nanoseconds *= PSNIP_CLOCK_NSEC_PER_SEC / f.QuadPart;
#elif defined(PSNIP_CLOCK_MONOTONIC_METHOD) && PSNIP_CLOCK_MONOTONIC_METHOD == PSNIP_CLOCK_METHOD_GETTICKCOUNT64
const ULONGLONG msec = GetTickCount64();
res->seconds = msec / 1000;
res->nanoseconds = sec % 1000;
#else
return -2;
#endif
return 0;
}
/* Returns the number of ticks per second for the specified clock.
* For example, a clock with millisecond precision would return 1000,
* and a clock with 1 second (such as the time() function) would
* return 1.
*
* If the requested clock isn't available, it will return 0.
* Hopefully this will be rare, but if it happens to you please let us
* know so we can work on finding a way to support your system.
*
* Note that different clocks on the same system often have a
* different precisions.
*/
PSNIP_CLOCK__FUNCTION psnip_uint32_t
psnip_clock_get_precision(enum PsnipClockType clock_type) {
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:return psnip_clock_monotonic_get_precision();
case PSNIP_CLOCK_TYPE_CPU:return psnip_clock_cpu_get_precision();
case PSNIP_CLOCK_TYPE_WALL:return psnip_clock_wall_get_precision();
}
PSNIP_CLOCK_UNREACHABLE();
return 0;
}
/* Set the provided timespec to the requested time. Returns 0 on
* success, or a negative value on failure. */
PSNIP_CLOCK__FUNCTION int
psnip_clock_get_time(enum PsnipClockType clock_type, struct PsnipClockTimespec *res) {
assert(res != NULL);
switch (clock_type) {
case PSNIP_CLOCK_TYPE_MONOTONIC:return psnip_clock_monotonic_get_time(res);
case PSNIP_CLOCK_TYPE_CPU:return psnip_clock_cpu_get_time(res);
case PSNIP_CLOCK_TYPE_WALL:return psnip_clock_wall_get_time(res);
}
return -1;
}
#endif /* !defined(PSNIP_CLOCK_H) */
static psnip_uint64_t
munit_clock_get_elapsed(struct PsnipClockTimespec *start, struct PsnipClockTimespec *end) {
psnip_uint64_t r = (end->seconds - start->seconds) * PSNIP_CLOCK_NSEC_PER_SEC;
if (end->nanoseconds < start->nanoseconds) {
r -= (start->nanoseconds - end->nanoseconds);
} else {
r += (end->nanoseconds - start->nanoseconds);
}
return r;
}
#else
# include <time.h>
#endif /* defined(MUNIT_ENABLE_TIMING) */
/*** PRNG stuff ***/
/* This is (unless I screwed up, which is entirely possible) the
* version of PCG with 32-bit state. It was chosen because it has a
* small enough state that we should reliably be able to use CAS
* instead of requiring a lock for thread-safety.
*
* If I did screw up, I probably will not bother changing it unless
* there is a significant bias. It's really not important this be
* particularly strong, as long as it is fairly random it's much more
* important that it be reproducible, so bug reports have a better
* chance of being reproducible. */
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) && !defined(__EMSCRIPTEN__) && (!defined(__GNUC_MINOR__) || (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ > 8))
# define HAVE_STDATOMIC
#elif defined(__clang__)
# if __has_extension(c_atomic)
# define HAVE_CLANG_ATOMICS
# endif
#endif
/* Workaround for http://llvm.org/bugs/show_bug.cgi?id=26911 */
#if defined(__clang__) && defined(_WIN32)
# undef HAVE_STDATOMIC
# if defined(__c2__)
# undef HAVE_CLANG_ATOMICS
# endif
#endif
#if defined(_OPENMP)
# define ATOMIC_UINT32_T uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(HAVE_STDATOMIC)
# include <stdatomic.h>
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) ATOMIC_VAR_INIT(x)
#elif defined(HAVE_CLANG_ATOMICS)
# define ATOMIC_UINT32_T _Atomic uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#elif defined(_WIN32)
# define ATOMIC_UINT32_T volatile LONG
# define ATOMIC_UINT32_INIT(x) (x)
#else
# define ATOMIC_UINT32_T volatile uint32_t
# define ATOMIC_UINT32_INIT(x) (x)
#endif
static ATOMIC_UINT32_T munit_rand_state = ATOMIC_UINT32_INIT(42);
#if defined(_OPENMP)
static inline void
munit_atomic_store(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T value) {
#pragma omp critical (munit_atomics)
*dest = value;
}
static inline uint32_t
munit_atomic_load(ATOMIC_UINT32_T* src) {
int ret;
#pragma omp critical (munit_atomics)
ret = *src;
return ret;
}
static inline uint32_t
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
munit_bool ret;
#pragma omp critical (munit_atomics)
{
if (*dest == *expected) {
*dest = desired;
ret = 1;
} else {
ret = 0;
}
}
return ret;
}
#elif defined(HAVE_STDATOMIC)
# define munit_atomic_store(dest, value) atomic_store(dest, value)
# define munit_atomic_load(src) atomic_load(src)
# define munit_atomic_cas(dest, expected, value) atomic_compare_exchange_weak(dest, expected, value)
#elif defined(HAVE_CLANG_ATOMICS)
# define munit_atomic_store(dest, value) __c11_atomic_store(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __c11_atomic_load(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __c11_atomic_compare_exchange_weak(dest, expected, value, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)
# define munit_atomic_store(dest, value) __atomic_store_n(dest, value, __ATOMIC_SEQ_CST)
# define munit_atomic_load(src) __atomic_load_n(src, __ATOMIC_SEQ_CST)
# define munit_atomic_cas(dest, expected, value) __atomic_compare_exchange_n(dest, expected, value, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)
#elif defined(__GNUC__) && (__GNUC__ >= 4)
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) __sync_bool_compare_and_swap(dest, *expected, value)
#elif defined(_WIN32) /* Untested */
# define munit_atomic_store(dest,value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
# define munit_atomic_cas(dest, expected, value) InterlockedCompareExchange((dest), (value), *(expected))
#else
# warning No atomic implementation, PRNG will not be thread-safe
# define munit_atomic_store(dest, value) do { *(dest) = (value); } while (0)
# define munit_atomic_load(src) (*(src))
static inline munit_bool
munit_atomic_cas(ATOMIC_UINT32_T* dest, ATOMIC_UINT32_T* expected, ATOMIC_UINT32_T desired) {
if (*dest == *expected) {
*dest = desired;
return 1;
} else {
return 0;
}
}
#endif
#define MUNIT_PRNG_MULTIPLIER (747796405U)
#define MUNIT_PRNG_INCREMENT (1729U)
static munit_uint32_t
munit_rand_next_state(munit_uint32_t state) {
return state * MUNIT_PRNG_MULTIPLIER + MUNIT_PRNG_INCREMENT;
}
static munit_uint32_t
munit_rand_from_state(munit_uint32_t state) {
munit_uint32_t res = ((state >> ((state >> 28) + 4)) ^ state) * (277803737U);
res ^= res >> 22;
return res;
}
void
munit_rand_seed(munit_uint32_t seed) {
munit_uint32_t state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
munit_atomic_store(&munit_rand_state, state);
}
static munit_uint32_t
munit_rand_generate_seed(void) {
munit_uint32_t seed, state;
#if defined(MUNIT_ENABLE_TIMING)
struct PsnipClockTimespec wc = {0,};
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wc);
seed = (munit_uint32_t) wc.nanoseconds;
#else
seed = (munit_uint32_t) time(NULL);
#endif
state = munit_rand_next_state(seed + MUNIT_PRNG_INCREMENT);
return munit_rand_from_state(state);
}
static munit_uint32_t
munit_rand_state_uint32(munit_uint32_t *state) {
const munit_uint32_t old = *state;
*state = munit_rand_next_state(old);
return munit_rand_from_state(old);
}
munit_uint32_t
munit_rand_uint32(void) {
munit_uint32_t old, state;
do {
old = munit_atomic_load(&munit_rand_state);
state = munit_rand_next_state(old);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return munit_rand_from_state(old);
}
static void
munit_rand_state_memory(munit_uint32_t *state, size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
size_t members_remaining = size / sizeof(munit_uint32_t);
size_t bytes_remaining = size % sizeof(munit_uint32_t);
munit_uint8_t *b = data;
munit_uint32_t rv;
while (members_remaining-- > 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, sizeof(munit_uint32_t));
b += sizeof(munit_uint32_t);
}
if (bytes_remaining != 0) {
rv = munit_rand_state_uint32(state);
memcpy(b, &rv, bytes_remaining);
}
}
void
munit_rand_memory(size_t size, munit_uint8_t data[MUNIT_ARRAY_PARAM(size)]) {
munit_uint32_t old, state;
do {
state = old = munit_atomic_load(&munit_rand_state);
munit_rand_state_memory(&state, size, data);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
}
static munit_uint32_t
munit_rand_state_at_most(munit_uint32_t *state, munit_uint32_t salt, munit_uint32_t max) {
/* We want (UINT32_MAX + 1) % max, which in unsigned arithmetic is the same
* as (UINT32_MAX + 1 - max) % max = -max % max. We compute -max using not
* to avoid compiler warnings.
*/
const munit_uint32_t min = (~max + 1U) % max;
munit_uint32_t x;
if (max == (~((munit_uint32_t) 0U)))
return munit_rand_state_uint32(state) ^ salt;
max++;
do {
x = munit_rand_state_uint32(state) ^ salt;
} while (x < min);
return x % max;
}
static munit_uint32_t
munit_rand_at_most(munit_uint32_t salt, munit_uint32_t max) {
munit_uint32_t old, state;
munit_uint32_t retval;
do {
state = old = munit_atomic_load(&munit_rand_state);
retval = munit_rand_state_at_most(&state, salt, max);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
int
munit_rand_int_range(int min, int max) {
munit_uint64_t range = (munit_uint64_t) max - (munit_uint64_t) min;
if (min > max)
return munit_rand_int_range(max, min);
if (range > (~((munit_uint32_t) 0U)))
range = (~((munit_uint32_t) 0U));
return min + munit_rand_at_most(0, (munit_uint32_t) range);
}
double
munit_rand_double(void) {
munit_uint32_t old, state;
double retval = 0.0;
do {
state = old = munit_atomic_load(&munit_rand_state);
/* See http://mumble.net/~campbell/tmp/random_real.c for how to do
* this right. Patches welcome if you feel that this is too
* biased. */
retval = munit_rand_state_uint32(&state) / ((~((munit_uint32_t) 0U)) + 1.0);
} while (!munit_atomic_cas(&munit_rand_state, &old, state));
return retval;
}
/*** Test suite handling ***/
typedef struct {
unsigned int successful;
unsigned int skipped;
unsigned int failed;
unsigned int errored;
#if defined(MUNIT_ENABLE_TIMING)
munit_uint64_t cpu_clock;
munit_uint64_t wall_clock;
#endif
} MunitReport;
typedef struct {
const char *prefix;
const MunitSuite *suite;
const char **tests;
munit_uint32_t seed;
unsigned int iterations;
MunitParameter *parameters;
munit_bool single_parameter_mode;
void *user_data;
MunitReport report;
munit_bool colorize;
munit_bool fork;
munit_bool show_stderr;
munit_bool fatal_failures;
} MunitTestRunner;
const char *
munit_parameters_get(const MunitParameter params[], const char *key) {
const MunitParameter *param;
for (param = params; param != NULL && param->name != NULL; param++)
if (strcmp(param->name, key) == 0)
return param->value;
return NULL;
}
#if defined(MUNIT_ENABLE_TIMING)
static void
munit_print_time(FILE *fp, munit_uint64_t nanoseconds) {
fprintf(fp, "%" MUNIT_TEST_TIME_FORMAT, ((double) nanoseconds) / ((double) PSNIP_CLOCK_NSEC_PER_SEC));
}
#endif
/* Add a paramter to an array of parameters. */
static MunitResult
munit_parameters_add(size_t *params_size,
MunitParameter *params[MUNIT_ARRAY_PARAM(*params_size)],
char *name,
char *value) {
*params = realloc(*params, sizeof(MunitParameter) * (*params_size + 2));
if (*params == NULL)
return MUNIT_ERROR;
(*params)[*params_size].name = name;
(*params)[*params_size].value = value;
(*params_size)++;
(*params)[*params_size].name = NULL;
(*params)[*params_size].value = NULL;
return MUNIT_OK;
}
/* Concatenate two strings, but just return one of the components
* unaltered if the other is NULL or "". */
static char *
munit_maybe_concat(size_t *len, char *prefix, char *suffix) {
char *res;
size_t res_l;
const size_t prefix_l = prefix != NULL ? strlen(prefix) : 0;
const size_t suffix_l = suffix != NULL ? strlen(suffix) : 0;
if (prefix_l == 0 && suffix_l == 0) {
res = NULL;
res_l = 0;
} else if (prefix_l == 0 && suffix_l != 0) {
res = suffix;
res_l = suffix_l;
} else if (prefix_l != 0 && suffix_l == 0) {
res = prefix;
res_l = prefix_l;
} else {
res_l = prefix_l + suffix_l;
res = malloc(res_l + 1);
memcpy(res, prefix, prefix_l);
memcpy(res + prefix_l, suffix, suffix_l);
res[res_l] = 0;
}
if (len != NULL)
*len = res_l;
return res;
}
/* Possbily free a string returned by munit_maybe_concat. */
static void
munit_maybe_free_concat(char *s, const char *prefix, const char *suffix) {
if (prefix != s && suffix != s)
free(s);
}
/* Cheap string hash function, just used to salt the PRNG. */
static munit_uint32_t
munit_str_hash(const char *name) {
const char *p;
munit_uint32_t h = 5381U;
for (p = name; *p != '\0'; p++)
h = (h << 5) + h + *p;
return h;
}
static void
munit_splice(int from, int to) {
munit_uint8_t buf[1024];
#if !defined(_WIN32)
ssize_t len;
ssize_t bytes_written;
ssize_t write_res;
#else
int len;
int bytes_written;
int write_res;
#endif
do {
len = read(from, buf, sizeof(buf));
if (len > 0) {
bytes_written = 0;
do {
write_res = write(to, buf + bytes_written, len - bytes_written);
if (write_res < 0)
break;
bytes_written += write_res;
} while (bytes_written < len);
} else
break;
} while (1);
}
/* This is the part that should be handled in the child process */
static MunitResult
munit_test_runner_exec(MunitTestRunner *runner,
const MunitTest *test,
const MunitParameter params[],
MunitReport *report) {
unsigned int iterations = runner->iterations;
MunitResult result = MUNIT_FAIL;
#if defined(MUNIT_ENABLE_TIMING)
struct PsnipClockTimespec wall_clock_begin = {0,}, wall_clock_end = {0,};
struct PsnipClockTimespec cpu_clock_begin = {0,}, cpu_clock_end = {0,};
#endif
unsigned int i = 0;
if ((test->options & MUNIT_TEST_OPTION_SINGLE_ITERATION) == MUNIT_TEST_OPTION_SINGLE_ITERATION)
iterations = 1;
else if (iterations == 0)
iterations = runner->suite->iterations;
munit_rand_seed(runner->seed);
do {
void *data = (test->setup == NULL) ? runner->user_data : test->setup(params, runner->user_data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_begin);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_begin);
#endif
result = test->test(params, data);
#if defined(MUNIT_ENABLE_TIMING)
psnip_clock_get_time(PSNIP_CLOCK_TYPE_WALL, &wall_clock_end);
psnip_clock_get_time(PSNIP_CLOCK_TYPE_CPU, &cpu_clock_end);
#endif
if (test->tear_down != NULL)
test->tear_down(data);
if (MUNIT_LIKELY(result == MUNIT_OK)) {
report->successful++;
#if defined(MUNIT_ENABLE_TIMING)
report->wall_clock += munit_clock_get_elapsed(&wall_clock_begin, &wall_clock_end);
report->cpu_clock += munit_clock_get_elapsed(&cpu_clock_begin, &cpu_clock_end);
#endif
} else {
switch ((int) result) {
case MUNIT_SKIP:report->skipped++;
break;
case MUNIT_FAIL:report->failed++;
break;
case MUNIT_ERROR:report->errored++;
break;
default:break;
}
break;
}
} while (++i < iterations);
return result;
}
#if defined(MUNIT_EMOTICON)
# define MUNIT_RESULT_STRING_OK ":)"
# define MUNIT_RESULT_STRING_SKIP ":|"
# define MUNIT_RESULT_STRING_FAIL ":("
# define MUNIT_RESULT_STRING_ERROR ":o"
# define MUNIT_RESULT_STRING_TODO ":/"
#else
# define MUNIT_RESULT_STRING_OK "OK "
# define MUNIT_RESULT_STRING_SKIP "SKIP "
# define MUNIT_RESULT_STRING_FAIL "FAIL "
# define MUNIT_RESULT_STRING_ERROR "ERROR"
# define MUNIT_RESULT_STRING_TODO "TODO "
#endif
static void
munit_test_runner_print_color(const MunitTestRunner *runner, const char *string, char color) {
if (runner->colorize)
fprintf(MUNIT_OUTPUT_FILE, "\x1b[3%cm%s\x1b[39m", color, string);
else
fputs(string, MUNIT_OUTPUT_FILE);
}
#if !defined(MUNIT_NO_BUFFER)
static int
munit_replace_stderr(FILE *stderr_buf) {
if (stderr_buf != NULL) {
const int orig_stderr = dup(STDERR_FILENO);
int errfd = fileno(stderr_buf);
if (MUNIT_UNLIKELY(errfd == -1)) {
exit(EXIT_FAILURE);
}
dup2(errfd, STDERR_FILENO);
return orig_stderr;
}
return -1;
}
static void
munit_restore_stderr(int orig_stderr) {
if (orig_stderr != -1) {
dup2(orig_stderr, STDERR_FILENO);
close(orig_stderr);
}
}
#endif /* !defined(MUNIT_NO_BUFFER) */
/* Run a test with the specified parameters. */
static void
munit_test_runner_run_test_with_params(MunitTestRunner *runner, const MunitTest *test, const MunitParameter params[]) {
MunitResult result = MUNIT_OK;
MunitReport report = {
0, 0, 0, 0,
#if defined(MUNIT_ENABLE_TIMING)
0, 0
#endif
};
unsigned int output_l;
munit_bool first;
const MunitParameter *param;
FILE *stderr_buf;
#if !defined(MUNIT_NO_FORK)
int pipefd[2];
pid_t fork_pid;
int orig_stderr;
ssize_t bytes_written = 0;
ssize_t write_res;
ssize_t bytes_read = 0;
ssize_t read_res;
int status = 0;
pid_t changed_pid;
#endif
if (params != NULL) {
output_l = 2;
fputs(" ", MUNIT_OUTPUT_FILE);
first = 1;
for (param = params; param != NULL && param->name != NULL; param++) {
if (!first) {
fputs(", ", MUNIT_OUTPUT_FILE);
output_l += 2;
} else {
first = 0;
}
output_l += fprintf(MUNIT_OUTPUT_FILE, "%s=%s", param->name, param->value);
}
while (output_l++ < MUNIT_TEST_NAME_LEN) {
fputc(' ', MUNIT_OUTPUT_FILE);
}
}
fflush(MUNIT_OUTPUT_FILE);
stderr_buf = NULL;
#if !defined(_WIN32) || defined(__MINGW32__)
stderr_buf = tmpfile();
#else
tmpfile_s(&stderr_buf);
#endif
if (stderr_buf == NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create buffer for stderr");
result = MUNIT_ERROR;
goto print_result;
}
#if !defined(MUNIT_NO_FORK)
if (runner->fork) {
pipefd[0] = -1;
pipefd[1] = -1;
if (pipe(pipefd) != 0) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to create pipe");
result = MUNIT_ERROR;
goto print_result;
}
fork_pid = fork();
if (fork_pid == 0) {
close(pipefd[0]);
orig_stderr = munit_replace_stderr(stderr_buf);
munit_test_runner_exec(runner, test, params, &report);
/* Note that we don't restore stderr. This is so we can buffer
* things written to stderr later on (such as by
* asan/tsan/ubsan, valgrind, etc.) */
close(orig_stderr);
do {
write_res = write(pipefd[1], ((munit_uint8_t *) (&report)) + bytes_written, sizeof(report) - bytes_written);
if (write_res < 0) {
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to write to pipe");
}
exit(EXIT_FAILURE);
}
bytes_written += write_res;
} while ((size_t) bytes_written < sizeof(report));
if (stderr_buf != NULL)
fclose(stderr_buf);
close(pipefd[1]);
exit(EXIT_SUCCESS);
} else if (fork_pid == -1) {
close(pipefd[0]);
close(pipefd[1]);
if (stderr_buf != NULL) {
munit_log_errno(MUNIT_LOG_ERROR, stderr, "unable to fork");
}
report.errored++;
result = MUNIT_ERROR;
} else {
close(pipefd[1]);
do {
read_res = read(pipefd[0], ((munit_uint8_t *) (&report)) + bytes_read, sizeof(report) - bytes_read);
if (read_res < 1)
break;
bytes_read += read_res;
} while (bytes_read < (ssize_t) sizeof(report));
changed_pid = waitpid(fork_pid, &status, 0);
if (MUNIT_LIKELY(changed_pid == fork_pid) && MUNIT_LIKELY(WIFEXITED(status))) {
if (bytes_read != sizeof(report)) {
munit_logf_internal(MUNIT_LOG_ERROR,
stderr_buf,
"child exited unexpectedly with status %d",
WEXITSTATUS(status));
report.errored++;
} else if (WEXITSTATUS(status) != EXIT_SUCCESS) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child exited with status %d", WEXITSTATUS(status));
report.errored++;
}
} else {
if (WIFSIGNALED(status)) {
#if defined(_XOPEN_VERSION) && (_XOPEN_VERSION >= 700)
munit_logf_internal(MUNIT_LOG_ERROR,
stderr_buf,
"child killed by signal %d (%s)",
WTERMSIG(status),
strsignal(WTERMSIG(status)));
#else
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child killed by signal %d", WTERMSIG(status));
#endif
} else if (WIFSTOPPED(status)) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr_buf, "child stopped by signal %d", WSTOPSIG(status));
}
report.errored++;
}
close(pipefd[0]);
waitpid(fork_pid, NULL, 0);
}
} else
#endif
{
#if !defined(MUNIT_NO_BUFFER)
const volatile int orig_stderr = munit_replace_stderr(stderr_buf);
#endif
#if defined(MUNIT_THREAD_LOCAL)
if (MUNIT_UNLIKELY(setjmp(munit_error_jmp_buf) != 0)) {
result = MUNIT_FAIL;
report.failed++;
} else {
munit_error_jmp_buf_valid = 1;
result = munit_test_runner_exec(runner, test, params, &report);
}
#else
result = munit_test_runner_exec(runner, test, params, &report);
#endif
#if !defined(MUNIT_NO_BUFFER)
munit_restore_stderr(orig_stderr);
#endif
/* Here just so that the label is used on Windows and we don't get
* a warning */
goto print_result;
}
print_result:
fputs("[ ", MUNIT_OUTPUT_FILE);
if ((test->options & MUNIT_TEST_OPTION_TODO) == MUNIT_TEST_OPTION_TODO) {
if (report.failed != 0 || report.errored != 0 || report.skipped != 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_TODO, '3');
result = MUNIT_OK;
} else {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
if (MUNIT_LIKELY(stderr_buf != NULL))
munit_log_internal(MUNIT_LOG_ERROR, stderr_buf, "Test marked TODO, but was successful.");
runner->report.failed++;
result = MUNIT_ERROR;
}
} else if (report.failed > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_FAIL, '1');
runner->report.failed++;
result = MUNIT_FAIL;
} else if (report.errored > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_ERROR, '1');
runner->report.errored++;
result = MUNIT_ERROR;
} else if (report.skipped > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_SKIP, '3');
runner->report.skipped++;
result = MUNIT_SKIP;
} else if (report.successful > 1) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock / report.successful);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock / report.successful);
fprintf(MUNIT_OUTPUT_FILE, " CPU ]\n %-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s Total: [ ", "");
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
} else if (report.successful > 0) {
munit_test_runner_print_color(runner, MUNIT_RESULT_STRING_OK, '2');
#if defined(MUNIT_ENABLE_TIMING)
fputs(" ] [ ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.wall_clock);
fputs(" / ", MUNIT_OUTPUT_FILE);
munit_print_time(MUNIT_OUTPUT_FILE, report.cpu_clock);
fputs(" CPU", MUNIT_OUTPUT_FILE);
#endif
runner->report.successful++;
result = MUNIT_OK;
}
fputs(" ]\n", MUNIT_OUTPUT_FILE);
if (stderr_buf != NULL) {
if (result == MUNIT_FAIL || result == MUNIT_ERROR || runner->show_stderr) {
fflush(MUNIT_OUTPUT_FILE);
rewind(stderr_buf);
munit_splice(fileno(stderr_buf), STDERR_FILENO);
fflush(stderr);
}
fclose(stderr_buf);
}
}
static void
munit_test_runner_run_test_wild(MunitTestRunner *runner,
const MunitTest *test,
const char *test_name,
MunitParameter *params,
MunitParameter *p) {
const MunitParameterEnum *pe;
char **values;
MunitParameter *next;
for (pe = test->parameters; pe != NULL && pe->name != NULL; pe++) {
if (p->name == pe->name)
break;
}
if (pe == NULL)
return;
for (values = pe->values; *values != NULL; values++) {
next = p + 1;
p->value = *values;
if (next->name == NULL) {
munit_test_runner_run_test_with_params(runner, test, params);
} else {
munit_test_runner_run_test_wild(runner, test, test_name, params, next);
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
break;
}
}
/* Run a single test, with every combination of parameters
* requested. */
static void
munit_test_runner_run_test(MunitTestRunner *runner,
const MunitTest *test,
const char *prefix) {
char *test_name = munit_maybe_concat(NULL, (char *) prefix, (char *) test->name);
/* The array of parameters to pass to
* munit_test_runner_run_test_with_params */
MunitParameter *params = NULL;
size_t params_l = 0;
/* Wildcard parameters are parameters which have possible values
* specified in the test, but no specific value was passed to the
* CLI. That means we want to run the test once for every
* possible combination of parameter values or, if --single was
* passed to the CLI, a single time with a random set of
* parameters. */
MunitParameter *wild_params = NULL;
size_t wild_params_l = 0;
const MunitParameterEnum *pe;
const MunitParameter *cli_p;
munit_bool filled;
unsigned int possible;
char **vals;
size_t first_wild;
const MunitParameter *wp;
int pidx;
munit_rand_seed(runner->seed);
fprintf(MUNIT_OUTPUT_FILE, "%-" MUNIT_XSTRINGIFY(MUNIT_TEST_NAME_LEN) "s", test_name);
if (test->parameters == NULL) {
/* No parameters. Simple, nice. */
munit_test_runner_run_test_with_params(runner, test, NULL);
} else {
fputc('\n', MUNIT_OUTPUT_FILE);
for (pe = test->parameters; pe != NULL && pe->name != NULL; pe++) {
/* Did we received a value for this parameter from the CLI? */
filled = 0;
for (cli_p = runner->parameters; cli_p != NULL && cli_p->name != NULL; cli_p++) {
if (strcmp(cli_p->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, cli_p->value) != MUNIT_OK))
goto cleanup;
filled = 1;
break;
}
}
if (filled)
continue;
/* Nothing from CLI, is the enum NULL/empty? We're not a
* fuzzer… */
if (pe->values == NULL || pe->values[0] == NULL)
continue;
/* If --single was passed to the CLI, choose a value from the
* list of possibilities randomly. */
if (runner->single_parameter_mode) {
possible = 0;
for (vals = pe->values; *vals != NULL; vals++)
possible++;
/* We want the tests to be reproducible, even if you're only
* running a single test, but we don't want every test with
* the same number of parameters to choose the same parameter
* number, so use the test name as a primitive salt. */
pidx = munit_rand_at_most(munit_str_hash(test_name), possible - 1);
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[pidx]) != MUNIT_OK))
goto cleanup;
} else {
/* We want to try every permutation. Put in a placeholder
* entry, we'll iterate through them later. */
if (MUNIT_UNLIKELY(munit_parameters_add(&wild_params_l, &wild_params, pe->name, NULL) != MUNIT_OK))
goto cleanup;
}
}
if (wild_params_l != 0) {
first_wild = params_l;
for (wp = wild_params; wp != NULL && wp->name != NULL; wp++) {
for (pe = test->parameters; pe != NULL && pe->name != NULL && pe->values != NULL; pe++) {
if (strcmp(wp->name, pe->name) == 0) {
if (MUNIT_UNLIKELY(munit_parameters_add(¶ms_l, ¶ms, pe->name, pe->values[0]) != MUNIT_OK))
goto cleanup;
}
}
}
munit_test_runner_run_test_wild(runner, test, test_name, params, params + first_wild);
} else {
munit_test_runner_run_test_with_params(runner, test, params);
}
cleanup:
free(params);
free(wild_params);
}
munit_maybe_free_concat(test_name, prefix, test->name);
}
/* Recurse through the suite and run all the tests. If a list of
* tests to run was provied on the command line, run only those
* tests. */
static void
munit_test_runner_run_suite(MunitTestRunner *runner,
const MunitSuite *suite,
const char *prefix) {
size_t pre_l;
char *pre = munit_maybe_concat(&pre_l, (char *) prefix, (char *) suite->prefix);
const MunitTest *test;
const char **test_name;
const MunitSuite *child_suite;
/* Run the tests. */
for (test = suite->tests; test != NULL && test->test != NULL; test++) {
if (runner->tests != NULL) { /* Specific tests were requested on the CLI */
for (test_name = runner->tests; test_name != NULL && *test_name != NULL; test_name++) {
if ((pre_l == 0 || strncmp(pre, *test_name, pre_l) == 0) &&
strncmp(test->name, *test_name + pre_l, strlen(*test_name + pre_l)) == 0) {
munit_test_runner_run_test(runner, test, pre);
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
}
}
} else { /* Run all tests */
munit_test_runner_run_test(runner, test, pre);
}
}
if (runner->fatal_failures && (runner->report.failed != 0 || runner->report.errored != 0))
goto cleanup;
/* Run any child suites. */
for (child_suite = suite->suites; child_suite != NULL && child_suite->prefix != NULL; child_suite++) {
munit_test_runner_run_suite(runner, child_suite, pre);
}
cleanup:
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static void
munit_test_runner_run(MunitTestRunner *runner) {
munit_test_runner_run_suite(runner, runner->suite, NULL);
}
static void
munit_print_help(int argc,
char *const argv[MUNIT_ARRAY_PARAM(argc + 1)],
void *user_data,
const MunitArgument arguments[]) {
const MunitArgument *arg;
(void) argc;
printf("USAGE: %s [OPTIONS...] [TEST...]\n\n", argv[0]);
puts(" --seed SEED\n"
" Value used to seed the PRNG. Must be a 32-bit integer in decimal\n"
" notation with no separators (commas, decimals, spaces, etc.), or\n"
" hexidecimal prefixed by \"0x\".\n"
" --iterations N\n"
" Run each test N times. 0 means the default number.\n"
" --param name value\n"
" A parameter key/value pair which will be passed to any test with\n"
" takes a parameter of that name. If not provided, the test will be\n"
" run once for each possible parameter value.\n"
" --list Write a list of all available tests.\n"
" --list-params\n"
" Write a list of all available tests and their possible parameters.\n"
" --single Run each parameterized test in a single configuration instead of\n"
" every possible combination\n"
" --log-visible debug|info|warning|error\n"
" --log-fatal debug|info|warning|error\n"
" Set the level at which messages of different severities are visible,\n"
" or cause the test to terminate.\n"
#if !defined(MUNIT_NO_FORK)
" --no-fork Do not execute tests in a child process. If this option is supplied\n"
" and a test crashes (including by failing an assertion), no further\n"
" tests will be performed.\n"
#endif
" --fatal-failures\n"
" Stop executing tests as soon as a failure is found.\n"
" --show-stderr\n"
" Show data written to stderr by the tests, even if the test succeeds.\n"
" --color auto|always|never\n"
" Colorize (or don't) the output.\n"
/* 12345678901234567890123456789012345678901234567890123456789012345678901234567890 */
" --help Print this help message and exit.\n");
#if defined(MUNIT_NL_LANGINFO)
setlocale(LC_ALL, "");
fputs((strcasecmp("UTF-8", nl_langinfo(CODESET)) == 0) ? "µnit" : "munit", stdout);
#else
puts("munit");
#endif
printf(" %d.%d.%d\n"
"Full documentation at: https://nemequ.github.io/munit/\n",
(MUNIT_CURRENT_VERSION >> 16) & 0xff,
(MUNIT_CURRENT_VERSION >> 8) & 0xff,
(MUNIT_CURRENT_VERSION >> 0) & 0xff);
for (arg = arguments; arg != NULL && arg->name != NULL; arg++)
arg->write_help(arg, user_data);
}
static const MunitArgument *
munit_arguments_find(const MunitArgument arguments[], const char *name) {
const MunitArgument *arg;
for (arg = arguments; arg != NULL && arg->name != NULL; arg++)
if (strcmp(arg->name, name) == 0)
return arg;
return NULL;
}
static void
munit_suite_list_tests(const MunitSuite *suite, munit_bool show_params, const char *prefix) {
size_t pre_l;
char *pre = munit_maybe_concat(&pre_l, (char *) prefix, (char *) suite->prefix);
const MunitTest *test;
const MunitParameterEnum *params;
munit_bool first;
char **val;
const MunitSuite *child_suite;
for (test = suite->tests;
test != NULL && test->name != NULL;
test++) {
if (pre != NULL)
fputs(pre, stdout);
puts(test->name);
if (show_params) {
for (params = test->parameters;
params != NULL && params->name != NULL;
params++) {
fprintf(stdout, " - %s: ", params->name);
if (params->values == NULL) {
puts("Any");
} else {
first = 1;
for (val = params->values;
*val != NULL;
val++) {
if (!first) {
fputs(", ", stdout);
} else {
first = 0;
}
fputs(*val, stdout);
}
putc('\n', stdout);
}
}
}
}
for (child_suite = suite->suites; child_suite != NULL && child_suite->prefix != NULL; child_suite++) {
munit_suite_list_tests(child_suite, show_params, pre);
}
munit_maybe_free_concat(pre, prefix, suite->prefix);
}
static munit_bool
munit_stream_supports_ansi(FILE *stream) {
#if !defined(_WIN32)
return isatty(fileno(stream));
#else
#if !defined(__MINGW32__)
size_t ansicon_size = 0;
#endif
if (isatty(fileno(stream))) {
#if !defined(__MINGW32__)
getenv_s(&ansicon_size, NULL, 0, "ANSICON");
return ansicon_size != 0;
#else
return getenv("ANSICON") != NULL;
#endif
}
return 0;
#endif
}
int
munit_suite_main_custom(const MunitSuite *suite, void *user_data,
int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)],
const MunitArgument arguments[]) {
int result = EXIT_FAILURE;
MunitTestRunner runner;
size_t parameters_size = 0;
size_t tests_size = 0;
int arg;
char *envptr;
unsigned long ts;
char *endptr;
unsigned long long iterations;
MunitLogLevel level;
const MunitArgument *argument;
const char **runner_tests;
unsigned int tests_run;
unsigned int tests_total;
runner.prefix = NULL;
runner.suite = NULL;
runner.tests = NULL;
runner.seed = 0;
runner.iterations = 0;
runner.parameters = NULL;
runner.single_parameter_mode = 0;
runner.user_data = NULL;
runner.report.successful = 0;
runner.report.skipped = 0;
runner.report.failed = 0;
runner.report.errored = 0;
#if defined(MUNIT_ENABLE_TIMING)
runner.report.cpu_clock = 0;
runner.report.wall_clock = 0;
#endif
runner.colorize = 0;
#if !defined(_WIN32)
runner.fork = 1;
#else
runner.fork = 0;
#endif
runner.show_stderr = 0;
runner.fatal_failures = 0;
runner.suite = suite;
runner.user_data = user_data;
runner.seed = munit_rand_generate_seed();
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
for (arg = 1; arg < argc; arg++) {
if (strncmp("--", argv[arg], 2) == 0) {
if (strcmp("seed", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
envptr = argv[arg + 1];
ts = strtoul(argv[arg + 1], &envptr, 0);
if (*envptr != '\0' || ts > (~((munit_uint32_t) 0U))) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.seed = (munit_uint32_t) ts;
arg++;
} else if (strcmp("iterations", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
endptr = argv[arg + 1];
iterations = strtoul(argv[arg + 1], &endptr, 0);
if (*endptr != '\0' || iterations > UINT_MAX) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
runner.iterations = (unsigned int) iterations;
arg++;
} else if (strcmp("param", argv[arg] + 2) == 0) {
if (arg + 2 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires two arguments", argv[arg]);
goto cleanup;
}
runner.parameters = realloc(runner.parameters, sizeof(MunitParameter) * (parameters_size + 2));
if (runner.parameters == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.parameters[parameters_size].name = (char *) argv[arg + 1];
runner.parameters[parameters_size].value = (char *) argv[arg + 2];
parameters_size++;
runner.parameters[parameters_size].name = NULL;
runner.parameters[parameters_size].value = NULL;
arg += 2;
} else if (strcmp("color", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "always") == 0)
runner.colorize = 1;
else if (strcmp(argv[arg + 1], "never") == 0)
runner.colorize = 0;
else if (strcmp(argv[arg + 1], "auto") == 0)
runner.colorize = munit_stream_supports_ansi(MUNIT_OUTPUT_FILE);
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
arg++;
} else if (strcmp("help", argv[arg] + 2) == 0) {
munit_print_help(argc, argv, user_data, arguments);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("single", argv[arg] + 2) == 0) {
runner.single_parameter_mode = 1;
} else if (strcmp("show-stderr", argv[arg] + 2) == 0) {
runner.show_stderr = 1;
#if !defined(_WIN32)
} else if (strcmp("no-fork", argv[arg] + 2) == 0) {
runner.fork = 0;
#endif
} else if (strcmp("fatal-failures", argv[arg] + 2) == 0) {
runner.fatal_failures = 1;
} else if (strcmp("log-visible", argv[arg] + 2) == 0 ||
strcmp("log-fatal", argv[arg] + 2) == 0) {
if (arg + 1 >= argc) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "%s requires an argument", argv[arg]);
goto cleanup;
}
if (strcmp(argv[arg + 1], "debug") == 0)
level = MUNIT_LOG_DEBUG;
else if (strcmp(argv[arg + 1], "info") == 0)
level = MUNIT_LOG_INFO;
else if (strcmp(argv[arg + 1], "warning") == 0)
level = MUNIT_LOG_WARNING;
else if (strcmp(argv[arg + 1], "error") == 0)
level = MUNIT_LOG_ERROR;
else {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "invalid value ('%s') passed to %s", argv[arg + 1], argv[arg]);
goto cleanup;
}
if (strcmp("log-visible", argv[arg] + 2) == 0)
munit_log_level_visible = level;
else
munit_log_level_fatal = level;
arg++;
} else if (strcmp("list", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, 0, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else if (strcmp("list-params", argv[arg] + 2) == 0) {
munit_suite_list_tests(suite, 1, NULL);
result = EXIT_SUCCESS;
goto cleanup;
} else {
argument = munit_arguments_find(arguments, argv[arg] + 2);
if (argument == NULL) {
munit_logf_internal(MUNIT_LOG_ERROR, stderr, "unknown argument ('%s')", argv[arg]);
goto cleanup;
}
if (!argument->parse_argument(suite, user_data, &arg, argc, argv))
goto cleanup;
}
} else {
runner_tests = realloc((void *) runner.tests, sizeof(char *) * (tests_size + 2));
if (runner_tests == NULL) {
munit_log_internal(MUNIT_LOG_ERROR, stderr, "failed to allocate memory");
goto cleanup;
}
runner.tests = runner_tests;
runner.tests[tests_size++] = argv[arg];
runner.tests[tests_size] = NULL;
}
}
fflush(stderr);
fprintf(MUNIT_OUTPUT_FILE, "Running test suite with seed 0x%08" PRIx32 "...\n", runner.seed);
munit_test_runner_run(&runner);
tests_run = runner.report.successful + runner.report.failed + runner.report.errored;
tests_total = tests_run + runner.report.skipped;
if (tests_run == 0) {
fprintf(stderr, "No tests run, %d (100%%) skipped.\n", runner.report.skipped);
} else {
fprintf(MUNIT_OUTPUT_FILE, "%d of %d (%0.0f%%) tests successful, %d (%0.0f%%) test skipped.\n",
runner.report.successful, tests_run,
(((double) runner.report.successful) / ((double) tests_run)) * 100.0,
runner.report.skipped,
(((double) runner.report.skipped) / ((double) tests_total)) * 100.0);
}
if (runner.report.failed == 0 && runner.report.errored == 0) {
result = EXIT_SUCCESS;
}
cleanup:
free(runner.parameters);
free((void *) runner.tests);
return result;
}
int
munit_suite_main(const MunitSuite *suite, void *user_data,
int argc, char *const argv[MUNIT_ARRAY_PARAM(argc + 1)]) {
return munit_suite_main_custom(suite, user_data, argc, argv, NULL);
} |
libomp_interface.h | // This file does not contain any code; it just contains additional text and formatting
// for doxygen.
//===----------------------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is dual licensed under the MIT and the University of Illinois Open
// Source Licenses. See LICENSE.txt for details.
//
//===----------------------------------------------------------------------===//
/*! @mainpage LLVM OpenMP* Runtime Library Interface
@section sec_intro Introduction
This document describes the interface provided by the
LLVM OpenMP\other runtime library to the compiler.
Routines that are directly called as simple functions by user code are
not currently described here, since their definition is in the OpenMP
specification available from http://openmp.org
The aim here is to explain the interface from the compiler to the runtime.
The overall design is described, and each function in the interface
has its own description. (At least, that's the ambition, we may not be there yet).
@section sec_building Building the Runtime
For the impatient, we cover building the runtime as the first topic here.
A top-level Makefile is provided that attempts to derive a suitable
configuration for the most commonly used environments. To see the
default settings, type:
@code
% make info
@endcode
You can change the Makefile's behavior with the following options:
- <b>omp_root</b>: The path to the top-level directory containing the top-level
Makefile. By default, this will take on the value of the
current working directory.
- <b>omp_os</b>: Operating system. By default, the build will attempt to
detect this. Currently supports "linux", "macos", and
"windows".
- <b>arch</b>: Architecture. By default, the build will attempt to
detect this if not specified by the user. Currently
supported values are
- "32" for IA-32 architecture
- "32e" for Intel® 64 architecture
- "mic" for Intel® Many Integrated Core Architecture (
If "mic" is specified then "icc" will be used as the
compiler, and appropriate k1om binutils will be used. The
necessary packages must be installed on the build machine
for this to be possible, but an
Intel® Xeon Phi™
coprocessor is not required to build the library).
- <b>compiler</b>: Which compiler to use for the build. Defaults to "icc"
or "icl" depending on the value of omp_os. Also supports
"gcc" when omp_os is "linux" for gcc\other versions
4.6.2 and higher. For icc on OS X\other, OS X\other versions
greater than 10.6 are not supported currently. Also, icc
version 13.0 is not supported. The selected compiler should be
installed and in the user's path. The corresponding
Fortran compiler should also be in the path.
- <b>mode</b>: Library mode: default is "release". Also supports "debug".
To use any of the options above, simple add <option_name>=<value>. For
example, if you want to build with gcc instead of icc, type:
@code
% make compiler=gcc
@endcode
Underneath the hood of the top-level Makefile, the runtime is built by
a perl script that in turn drives a detailed runtime system make. The
script can be found at <tt>tools/build.pl</tt>, and will print
information about all its flags and controls if invoked as
@code
% tools/build.pl --help
@endcode
If invoked with no arguments, it will try to build a set of libraries
that are appropriate for the machine on which the build is happening.
There are many options for building out of tree, and configuring library
features that can also be used. Consult the <tt>--help</tt> output for details.
@section sec_supported Supported RTL Build Configurations
The architectures supported are IA-32 architecture, Intel® 64, and
Intel® Many Integrated Core Architecture. The build configurations
supported are shown in the table below.
<table border=1>
<tr><th> <th>icc/icl<th>gcc
<tr><td>Linux\other OS<td>Yes(1,5)<td>Yes(2,4)
<tr><td>OS X\other<td>Yes(1,3,4)<td>No
<tr><td>Windows\other OS<td>Yes(1,4)<td>No
</table>
(1) On IA-32 architecture and Intel® 64, icc/icl versions 12.x
are supported (12.1 is recommended).<br>
(2) gcc version 4.6.2 is supported.<br>
(3) For icc on OS X\other, OS X\other version 10.5.8 is supported.<br>
(4) Intel® Many Integrated Core Architecture not supported.<br>
(5) On Intel® Many Integrated Core Architecture, icc/icl versions 13.0 or later are required.
@section sec_frontend Front-end Compilers that work with this RTL
The following compilers are known to do compatible code generation for
this RTL: icc/icl, gcc. Code generation is discussed in more detail
later in this document.
@section sec_outlining Outlining
The runtime interface is based on the idea that the compiler
"outlines" sections of code that are to run in parallel into separate
functions that can then be invoked in multiple threads. For instance,
simple code like this
@code
void foo()
{
#pragma omp parallel
{
... do something ...
}
}
@endcode
is converted into something that looks conceptually like this (where
the names used are merely illustrative; the real library function
names will be used later after we've discussed some more issues...)
@code
static void outlinedFooBody()
{
... do something ...
}
void foo()
{
__OMP_runtime_fork(outlinedFooBody, (void*)0); // Not the real function name!
}
@endcode
@subsection SEC_SHAREDVARS Addressing shared variables
In real uses of the OpenMP\other API there are normally references
from the outlined code to shared variables that are in scope in the containing function.
Therefore the containing function must be able to address
these variables. The runtime supports two alternate ways of doing
this.
@subsubsection SEC_SEC_OT Current Technique
The technique currently supported by the runtime library is to receive
a separate pointer to each shared variable that can be accessed from
the outlined function. This is what is shown in the example below.
We hope soon to provide an alternative interface to support the
alternate implementation described in the next section. The
alternative implementation has performance advantages for small
parallel regions that have many shared variables.
@subsubsection SEC_SEC_PT Future Technique
The idea is to treat the outlined function as though it
were a lexically nested function, and pass it a single argument which
is the pointer to the parent's stack frame. Provided that the compiler
knows the layout of the parent frame when it is generating the outlined
function it can then access the up-level variables at appropriate
offsets from the parent frame. This is a classical compiler technique
from the 1960s to support languages like Algol (and its descendants)
that support lexically nested functions.
The main benefit of this technique is that there is no code required
at the fork point to marshal the arguments to the outlined function.
Since the runtime knows statically how many arguments must be passed to the
outlined function, it can easily copy them to the thread's stack
frame. Therefore the performance of the fork code is independent of
the number of shared variables that are accessed by the outlined
function.
If it is hard to determine the stack layout of the parent while generating the
outlined code, it is still possible to use this approach by collecting all of
the variables in the parent that are accessed from outlined functions into
a single `struct` which is placed on the stack, and whose address is passed
to the outlined functions. In this way the offsets of the shared variables
are known (since they are inside the struct) without needing to know
the complete layout of the parent stack-frame. From the point of view
of the runtime either of these techniques is equivalent, since in either
case it only has to pass a single argument to the outlined function to allow
it to access shared variables.
A scheme like this is how gcc\other generates outlined functions.
@section SEC_INTERFACES Library Interfaces
The library functions used for specific parts of the OpenMP\other language implementation
are documented in different modules.
- @ref BASIC_TYPES fundamental types used by the runtime in many places
- @ref DEPRECATED functions that are in the library but are no longer required
- @ref STARTUP_SHUTDOWN functions for initializing and finalizing the runtime
- @ref PARALLEL functions for implementing `omp parallel`
- @ref THREAD_STATES functions for supporting thread state inquiries
- @ref WORK_SHARING functions for work sharing constructs such as `omp for`, `omp sections`
- @ref THREADPRIVATE functions to support thread private data, copyin etc
- @ref SYNCHRONIZATION functions to support `omp critical`, `omp barrier`, `omp master`, reductions etc
- @ref ATOMIC_OPS functions to support atomic operations
- @ref STATS_GATHERING macros to support developer profiling of libomp
- Documentation on tasking has still to be written...
@section SEC_EXAMPLES Examples
@subsection SEC_WORKSHARING_EXAMPLE Work Sharing Example
This example shows the code generated for a parallel for with reduction and dynamic scheduling.
@code
extern float foo( void );
int main () {
int i;
float r = 0.0;
#pragma omp parallel for schedule(dynamic) reduction(+:r)
for ( i = 0; i < 10; i ++ ) {
r += foo();
}
}
@endcode
The transformed code looks like this.
@code
extern float foo( void );
int main () {
static int zero = 0;
auto int gtid;
auto float r = 0.0;
__kmpc_begin( & loc3, 0 );
// The gtid is not actually required in this example so could be omitted;
// We show its initialization here because it is often required for calls into
// the runtime and should be locally cached like this.
gtid = __kmpc_global thread num( & loc3 );
__kmpc_fork call( & loc7, 1, main_7_parallel_3, & r );
__kmpc_end( & loc0 );
return 0;
}
struct main_10_reduction_t_5 { float r_10_rpr; };
static kmp_critical_name lck = { 0 };
static ident_t loc10; // loc10.flags should contain KMP_IDENT_ATOMIC_REDUCE bit set
// if compiler has generated an atomic reduction.
void main_7_parallel_3( int *gtid, int *btid, float *r_7_shp ) {
auto int i_7_pr;
auto int lower, upper, liter, incr;
auto struct main_10_reduction_t_5 reduce;
reduce.r_10_rpr = 0.F;
liter = 0;
__kmpc_dispatch_init_4( & loc7,*gtid, 35, 0, 9, 1, 1 );
while ( __kmpc_dispatch_next_4( & loc7, *gtid, & liter, & lower, & upper, & incr ) ) {
for( i_7_pr = lower; upper >= i_7_pr; i_7_pr ++ )
reduce.r_10_rpr += foo();
}
switch( __kmpc_reduce_nowait( & loc10, *gtid, 1, 4, & reduce, main_10_reduce_5, & lck ) ) {
case 1:
*r_7_shp += reduce.r_10_rpr;
__kmpc_end_reduce_nowait( & loc10, *gtid, & lck );
break;
case 2:
__kmpc_atomic_float4_add( & loc10, *gtid, r_7_shp, reduce.r_10_rpr );
break;
default:;
}
}
void main_10_reduce_5( struct main_10_reduction_t_5 *reduce_lhs,
struct main_10_reduction_t_5 *reduce_rhs )
{
reduce_lhs->r_10_rpr += reduce_rhs->r_10_rpr;
}
@endcode
@defgroup BASIC_TYPES Basic Types
Types that are used throughout the runtime.
@defgroup DEPRECATED Deprecated Functions
Functions in this group are for backwards compatibility only, and
should not be used in new code.
@defgroup STARTUP_SHUTDOWN Startup and Shutdown
These functions are for library initialization and shutdown.
@defgroup PARALLEL Parallel (fork/join)
These functions are used for implementing <tt>\#pragma omp parallel</tt>.
@defgroup THREAD_STATES Thread Information
These functions return information about the currently executing thread.
@defgroup WORK_SHARING Work Sharing
These functions are used for implementing
<tt>\#pragma omp for</tt>, <tt>\#pragma omp sections</tt>, <tt>\#pragma omp single</tt> and
<tt>\#pragma omp master</tt> constructs.
When handling loops, there are different functions for each of the signed and unsigned 32 and 64 bit integer types
which have the name suffixes `_4`, `_4u`, `_8` and `_8u`. The semantics of each of the functions is the same,
so they are only described once.
Static loop scheduling is handled by @ref __kmpc_for_static_init_4 and friends. Only a single call is needed,
since the iterations to be executed by any give thread can be determined as soon as the loop parameters are known.
Dynamic scheduling is handled by the @ref __kmpc_dispatch_init_4 and @ref __kmpc_dispatch_next_4 functions.
The init function is called once in each thread outside the loop, while the next function is called each
time that the previous chunk of work has been exhausted.
@defgroup SYNCHRONIZATION Synchronization
These functions are used for implementing barriers.
@defgroup THREADPRIVATE Thread private data support
These functions support copyin/out and thread private data.
@defgroup STATS_GATHERING Statistics Gathering from OMPTB
These macros support profiling the libomp library. Use --stats=on when building with build.pl to enable
and then use the KMP_* macros to profile (through counts or clock ticks) libomp during execution of an OpenMP program.
@section sec_stats_env_vars Environment Variables
This section describes the environment variables relevent to stats-gathering in libomp
@code
KMP_STATS_FILE
@endcode
This environment variable is set to an output filename that will be appended *NOT OVERWRITTEN* if it exists. If this environment variable is undefined, the statistics will be output to stderr
@code
KMP_STATS_THREADS
@endcode
This environment variable indicates to print thread-specific statistics as well as aggregate statistics. Each thread's statistics will be shown as well as the collective sum of all threads. The values "true", "on", "1", "yes" will all indicate to print per thread statistics.
@defgroup TASKING Tasking support
These functions support tasking constructs.
@defgroup USER User visible functions
These functions can be called directly by the user, but are runtime library specific, rather than being OpenMP interfaces.
*/
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define FOURCC_DX10 0x30315844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#define DDSEXT_DIMENSION_TEX2D 0x00000003
#define DDSEXTFLAGS_CUBEMAP 0x00000004
typedef enum DXGI_FORMAT
{
DXGI_FORMAT_UNKNOWN,
DXGI_FORMAT_R32G32B32A32_TYPELESS,
DXGI_FORMAT_R32G32B32A32_FLOAT,
DXGI_FORMAT_R32G32B32A32_UINT,
DXGI_FORMAT_R32G32B32A32_SINT,
DXGI_FORMAT_R32G32B32_TYPELESS,
DXGI_FORMAT_R32G32B32_FLOAT,
DXGI_FORMAT_R32G32B32_UINT,
DXGI_FORMAT_R32G32B32_SINT,
DXGI_FORMAT_R16G16B16A16_TYPELESS,
DXGI_FORMAT_R16G16B16A16_FLOAT,
DXGI_FORMAT_R16G16B16A16_UNORM,
DXGI_FORMAT_R16G16B16A16_UINT,
DXGI_FORMAT_R16G16B16A16_SNORM,
DXGI_FORMAT_R16G16B16A16_SINT,
DXGI_FORMAT_R32G32_TYPELESS,
DXGI_FORMAT_R32G32_FLOAT,
DXGI_FORMAT_R32G32_UINT,
DXGI_FORMAT_R32G32_SINT,
DXGI_FORMAT_R32G8X24_TYPELESS,
DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS,
DXGI_FORMAT_X32_TYPELESS_G8X24_UINT,
DXGI_FORMAT_R10G10B10A2_TYPELESS,
DXGI_FORMAT_R10G10B10A2_UNORM,
DXGI_FORMAT_R10G10B10A2_UINT,
DXGI_FORMAT_R11G11B10_FLOAT,
DXGI_FORMAT_R8G8B8A8_TYPELESS,
DXGI_FORMAT_R8G8B8A8_UNORM,
DXGI_FORMAT_R8G8B8A8_UNORM_SRGB,
DXGI_FORMAT_R8G8B8A8_UINT,
DXGI_FORMAT_R8G8B8A8_SNORM,
DXGI_FORMAT_R8G8B8A8_SINT,
DXGI_FORMAT_R16G16_TYPELESS,
DXGI_FORMAT_R16G16_FLOAT,
DXGI_FORMAT_R16G16_UNORM,
DXGI_FORMAT_R16G16_UINT,
DXGI_FORMAT_R16G16_SNORM,
DXGI_FORMAT_R16G16_SINT,
DXGI_FORMAT_R32_TYPELESS,
DXGI_FORMAT_D32_FLOAT,
DXGI_FORMAT_R32_FLOAT,
DXGI_FORMAT_R32_UINT,
DXGI_FORMAT_R32_SINT,
DXGI_FORMAT_R24G8_TYPELESS,
DXGI_FORMAT_D24_UNORM_S8_UINT,
DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
DXGI_FORMAT_X24_TYPELESS_G8_UINT,
DXGI_FORMAT_R8G8_TYPELESS,
DXGI_FORMAT_R8G8_UNORM,
DXGI_FORMAT_R8G8_UINT,
DXGI_FORMAT_R8G8_SNORM,
DXGI_FORMAT_R8G8_SINT,
DXGI_FORMAT_R16_TYPELESS,
DXGI_FORMAT_R16_FLOAT,
DXGI_FORMAT_D16_UNORM,
DXGI_FORMAT_R16_UNORM,
DXGI_FORMAT_R16_UINT,
DXGI_FORMAT_R16_SNORM,
DXGI_FORMAT_R16_SINT,
DXGI_FORMAT_R8_TYPELESS,
DXGI_FORMAT_R8_UNORM,
DXGI_FORMAT_R8_UINT,
DXGI_FORMAT_R8_SNORM,
DXGI_FORMAT_R8_SINT,
DXGI_FORMAT_A8_UNORM,
DXGI_FORMAT_R1_UNORM,
DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
DXGI_FORMAT_R8G8_B8G8_UNORM,
DXGI_FORMAT_G8R8_G8B8_UNORM,
DXGI_FORMAT_BC1_TYPELESS,
DXGI_FORMAT_BC1_UNORM,
DXGI_FORMAT_BC1_UNORM_SRGB,
DXGI_FORMAT_BC2_TYPELESS,
DXGI_FORMAT_BC2_UNORM,
DXGI_FORMAT_BC2_UNORM_SRGB,
DXGI_FORMAT_BC3_TYPELESS,
DXGI_FORMAT_BC3_UNORM,
DXGI_FORMAT_BC3_UNORM_SRGB,
DXGI_FORMAT_BC4_TYPELESS,
DXGI_FORMAT_BC4_UNORM,
DXGI_FORMAT_BC4_SNORM,
DXGI_FORMAT_BC5_TYPELESS,
DXGI_FORMAT_BC5_UNORM,
DXGI_FORMAT_BC5_SNORM,
DXGI_FORMAT_B5G6R5_UNORM,
DXGI_FORMAT_B5G5R5A1_UNORM,
DXGI_FORMAT_B8G8R8A8_UNORM,
DXGI_FORMAT_B8G8R8X8_UNORM,
DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM,
DXGI_FORMAT_B8G8R8A8_TYPELESS,
DXGI_FORMAT_B8G8R8A8_UNORM_SRGB,
DXGI_FORMAT_B8G8R8X8_TYPELESS,
DXGI_FORMAT_B8G8R8X8_UNORM_SRGB,
DXGI_FORMAT_BC6H_TYPELESS,
DXGI_FORMAT_BC6H_UF16,
DXGI_FORMAT_BC6H_SF16,
DXGI_FORMAT_BC7_TYPELESS,
DXGI_FORMAT_BC7_UNORM,
DXGI_FORMAT_BC7_UNORM_SRGB,
DXGI_FORMAT_AYUV,
DXGI_FORMAT_Y410,
DXGI_FORMAT_Y416,
DXGI_FORMAT_NV12,
DXGI_FORMAT_P010,
DXGI_FORMAT_P016,
DXGI_FORMAT_420_OPAQUE,
DXGI_FORMAT_YUY2,
DXGI_FORMAT_Y210,
DXGI_FORMAT_Y216,
DXGI_FORMAT_NV11,
DXGI_FORMAT_AI44,
DXGI_FORMAT_IA44,
DXGI_FORMAT_P8,
DXGI_FORMAT_A8P8,
DXGI_FORMAT_B4G4R4A4_UNORM,
DXGI_FORMAT_P208,
DXGI_FORMAT_V208,
DXGI_FORMAT_V408,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE,
DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE,
DXGI_FORMAT_FORCE_UINT
} DXGI_FORMAT;
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2,
extFormat,
extDimension,
extFlags,
extArraySize,
extFlags2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _BC7Colors
{
unsigned char
r[6],
g[6],
b[6],
a[6];
} BC7Colors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColorLookup
{
DDSSourceBlock sources[2];
} DDSSingleColorLookup;
typedef struct _BC7ModeInfo
{
unsigned char
partition_bits,
num_subsets,
color_precision,
alpha_precision,
num_pbits,
index_precision,
index2_precision;
} BC7ModeInfo;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,const DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,const DDSInfo *,ExceptionInfo *);
static const DDSSingleColorLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColorLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColorLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
static const unsigned char BC7_weight2[] = { 0, 21, 43, 64 };
static const unsigned char BC7_weight3[] = { 0, 9, 18, 27, 37, 46, 55, 64 };
static const unsigned char BC7_weight4[] = { 0, 4, 9, 13, 17, 21, 26, 30, 34,
38, 43, 47, 51, 55, 60, 64 };
/* stores info for each mode of BC7 */
static const BC7ModeInfo BC7_mode_info[8] =
{
{ 4, 3, 4, 0, 6, 3, 0 }, /* mode 0 */
{ 6, 2, 6, 0, 2, 3, 0 }, /* mode 1 */
{ 6, 3, 5, 0, 0, 2, 0 }, /* mode 2 */
{ 6, 2, 7, 0, 4, 2, 0 }, /* mode 3 */
{ 0, 1, 5, 6, 0, 2, 3 }, /* mode 4 */
{ 0, 1, 7, 8, 0, 2, 2 }, /* mode 5 */
{ 0, 1, 7, 7, 2, 4, 0 }, /* mode 6 */
{ 6, 2, 5, 5, 4, 2, 0 }, /* mode 7 */
};
static const unsigned char BC7_partition_table[2][64][16] =
{
{ /* BC7 Partition Set for 2 Subsets */
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1 },
{ 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1 },
{ 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1 },
{ 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0 },
{ 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0 },
{ 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1 },
{ 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0 },
{ 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1 },
{ 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0 },
{ 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0 },
{ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0 },
{ 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1 },
{ 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0 },
{ 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0 },
{ 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0 },
{ 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0 },
{ 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0 },
{ 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1 },
{ 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0 },
{ 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0 },
{ 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1 },
{ 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1 },
{ 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1 },
{ 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0 },
{ 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1 }
},
{ /* BC7 Partition Set for 3 Subsets */
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 2, 0, 0, 1, 2, 2, 1, 1, 2, 2, 1, 1 },
{ 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 1, 0, 1, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2 },
{ 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2, 0, 1, 1, 2 },
{ 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2, 0, 1, 2, 2 },
{ 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2, 1, 2, 2, 2 },
{ 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0, 2, 2, 2, 0 },
{ 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 2, 1, 1, 2, 2 },
{ 0, 1, 1, 1, 0, 0, 1, 1, 2, 0, 0, 1, 2, 2, 0, 0 },
{ 0, 0, 0, 0, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 2, 2, 0, 0, 2, 2, 1, 1, 1, 1 },
{ 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2, 0, 2, 2, 2 },
{ 0, 0, 0, 1, 0, 0, 0, 1, 2, 2, 2, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2 },
{ 0, 0, 0, 0, 1, 1, 0, 0, 2, 2, 1, 0, 2, 2, 1, 0 },
{ 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1, 0, 0, 0, 0 },
{ 0, 0, 1, 2, 0, 0, 1, 2, 1, 1, 2, 2, 2, 2, 2, 2 },
{ 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1, 0, 1, 1, 0 },
{ 0, 0, 0, 0, 0, 1, 1, 0, 1, 2, 2, 1, 1, 2, 2, 1 },
{ 0, 0, 2, 2, 1, 1, 0, 2, 1, 1, 0, 2, 0, 0, 2, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 2, 0, 0, 2, 2, 2, 2, 2 },
{ 0, 0, 1, 1, 0, 1, 2, 2, 0, 1, 2, 2, 0, 0, 1, 1 },
{ 0, 0, 0, 0, 2, 0, 0, 0, 2, 2, 1, 1, 2, 2, 2, 1 },
{ 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 2, 2, 2 },
{ 0, 2, 2, 2, 0, 0, 2, 2, 0, 0, 1, 2, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 0, 0, 1, 2, 0, 0, 2, 2, 0, 2, 2, 2 },
{ 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0, 0, 1, 2, 0 },
{ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0 },
{ 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0 },
{ 0, 1, 2, 0, 2, 0, 1, 2, 1, 2, 0, 1, 0, 1, 2, 0 },
{ 0, 0, 1, 1, 2, 2, 0, 0, 1, 1, 2, 2, 0, 0, 1, 1 },
{ 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 0, 0, 0, 0, 1, 1 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1 },
{ 0, 0, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2, 1, 1, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 2, 2, 0, 0, 1, 1 },
{ 0, 2, 2, 0, 1, 2, 2, 1, 0, 2, 2, 0, 1, 2, 2, 1 },
{ 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 0, 1, 0, 1 },
{ 0, 0, 0, 0, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1 },
{ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 2, 2, 2 },
{ 0, 2, 2, 2, 0, 1, 1, 1, 0, 2, 2, 2, 0, 1, 1, 1 },
{ 0, 0, 0, 2, 1, 1, 1, 2, 0, 0, 0, 2, 1, 1, 1, 2 },
{ 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2, 2, 1, 1, 2 },
{ 0, 2, 2, 2, 0, 1, 1, 1, 0, 1, 1, 1, 0, 2, 2, 2 },
{ 0, 0, 0, 2, 1, 1, 1, 2, 1, 1, 1, 2, 0, 0, 0, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2, 2, 1, 1, 2 },
{ 0, 1, 1, 0, 0, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 0, 2, 2, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 2, 2 },
{ 0, 0, 2, 2, 1, 1, 2, 2, 1, 1, 2, 2, 0, 0, 2, 2 },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 1, 2 },
{ 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1 },
{ 0, 2, 2, 2, 1, 2, 2, 2, 0, 2, 2, 2, 1, 2, 2, 2 },
{ 0, 1, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2 },
{ 0, 1, 1, 1, 2, 0, 1, 1, 2, 2, 0, 1, 2, 2, 2, 0 }
}
};
static const unsigned char BC7_anchor_index_table[4][64] =
{
/* Anchor index values for the first subset */
{
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0
},
/* Anchor index values for the second subset of two-subset partitioning */
{
15,15,15,15,15,15,15,15,
15,15,15,15,15,15,15,15,
15, 2, 8, 2, 2, 8, 8,15,
2, 8, 2, 2, 8, 8, 2, 2,
15,15, 6, 8, 2, 8,15,15,
2, 8, 2, 2, 2,15,15, 6,
6, 2, 6, 8,15,15, 2, 2,
15,15,15,15,15, 2, 2,15
},
/* Anchor index values for the second subset of three-subset partitioning */
{
3, 3,15,15, 8, 3,15,15,
8, 8, 6, 6, 6, 5, 3, 3,
3, 3, 8,15, 3, 3, 6,10,
5, 8, 8, 6, 8, 5,15,15,
8,15, 3, 5, 6,10, 8,15,
15, 3,15, 5,15,15,15,15,
3,15, 5, 5, 5, 8, 5,10,
5,10, 8,13,15,12, 3, 3
},
/* Anchor index values for the third subset of three-subset partitioning */
{
15, 8, 8, 3,15,15, 3, 8,
15,15,15,15,15,15,15, 8,
15, 8,15, 3,15, 8,15, 8,
3,15, 6,10,15,15,10, 8,
15, 3,15,10,10, 8, 9,10,
6,15, 8,15, 3, 6, 6, 8,
15, 3,15,15,15,15,15,15,
15,15,15,15, 3,15,15, 8
}
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static inline unsigned char GetSubsetIndex(unsigned char numSubsets,
unsigned char partition_id,size_t pixelIndex)
{
if (numSubsets == 2)
return BC7_partition_table[0][partition_id][pixelIndex];
if (numSubsets == 3)
return BC7_partition_table[1][partition_id][pixelIndex];
return 0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
/* Read optional DX10 header if available */
if ((dds_info->pixelformat.flags & DDPF_FOURCC) &&
(dds_info->pixelformat.fourcc == FOURCC_DX10))
{
dds_info->extFormat = ReadBlobLSBLong(image);
dds_info->extDimension = ReadBlobLSBLong(image);
dds_info->extFlags = ReadBlobLSBLong(image);
dds_info->extArraySize = ReadBlobLSBLong(image);
dds_info->extFlags2 = ReadBlobLSBLong(image);
}
else
{
dds_info->extFormat = 0;
dds_info->extDimension = 0;
dds_info->extFlags = 0;
dds_info->extArraySize = 0;
dds_info->extFlags2 = 0;
}
return(MagickTrue);
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL)
return(MagickFalse);
image->next->alpha_trait=image->alpha_trait;
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,const DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
Quantum
*q;
ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
Quantum
*q;
ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static unsigned char GetBit(const unsigned char *block,size_t *start_bit)
{
size_t
base,
index;
index=(*start_bit) >> 3;
base=(*start_bit) - (index << 3);
(*start_bit)++;
if (index > 15)
return(0);
return((block[index] >> base) & 0x01);
}
static unsigned char GetBits(const unsigned char *block,size_t *start_bit,
unsigned char num_bits)
{
size_t
base,
first_bits,
index,
next_bits;
unsigned char
ret;
index=(*start_bit) >> 3;
base=(*start_bit)-(index << 3);
if (index > 15)
return(0);
if (base + num_bits > 8)
{
first_bits=8-base;
next_bits=num_bits-first_bits;
ret=((block[index] >> base) | (((block[index + 1]) &
((1u << next_bits) - 1)) << first_bits));
}
else
{
ret=((block[index] >> base) & ((1 << num_bits) - 1));
}
(*start_bit)+=num_bits;
return(ret);
}
static MagickBooleanType IsPixelAnchorIndex(unsigned char subset_index,
unsigned char num_subsets,size_t pixelIndex,unsigned char partition_id)
{
size_t
table_index;
/* for first subset */
if (subset_index == 0)
table_index=0;
/* for second subset of two subset partitioning */
else if ((subset_index == 1) && (num_subsets == 2))
table_index=1;
/* for second subset of three subset partitioning */
else if ((subset_index == 1) && (num_subsets == 3))
table_index=2;
/* for third subset of three subset partitioning */
else
table_index=3;
if (BC7_anchor_index_table[table_index][partition_id] == pixelIndex)
return(MagickTrue);
else
return(MagickFalse);
}
static void ReadEndpoints(BC7Colors *endpoints,const unsigned char *block,
size_t mode,size_t *start_bit)
{
MagickBooleanType
has_alpha,
has_pbits;
unsigned char
alpha_bits,
color_bits,
pbit,
pbit0,
pbit1;
size_t
num_subsets,
i;
num_subsets=(size_t) BC7_mode_info[mode].num_subsets;
color_bits=BC7_mode_info[mode].color_precision;
/* red */
for (i=0; i < num_subsets * 2; i++)
endpoints->r[i]=GetBits(block,start_bit,color_bits);
/* green */
for (i=0; i < num_subsets * 2; i++)
endpoints->g[i]=GetBits(block,start_bit,color_bits);
/* blue */
for (i=0; i < num_subsets * 2; i++)
endpoints->b[i]=GetBits(block,start_bit,color_bits);
/* alpha */
alpha_bits=BC7_mode_info[mode].alpha_precision;
has_alpha=mode >= 4;
if (has_alpha != MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
endpoints->a[i]=GetBits(block,start_bit,alpha_bits);
}
/* handle modes that have p bits */
has_pbits=(mode == 0) || (mode == 1) || (mode == 3) || (mode == 6) || (mode == 7);
if (has_pbits != MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
{
endpoints->r[i] <<= 1;
endpoints->g[i] <<= 1;
endpoints->b[i] <<= 1;
endpoints->a[i] <<= 1;
}
/* mode 1 shares a p-bit for both endpoints */
if (mode == 1)
{
pbit0=GetBit(block,start_bit);
pbit1=GetBit(block,start_bit);
endpoints->r[0] |= pbit0;
endpoints->g[0] |= pbit0;
endpoints->b[0] |= pbit0;
endpoints->r[1] |= pbit0;
endpoints->g[1] |= pbit0;
endpoints->b[1] |= pbit0;
endpoints->r[2] |= pbit1;
endpoints->g[2] |= pbit1;
endpoints->b[2] |= pbit1;
endpoints->r[3] |= pbit1;
endpoints->g[3] |= pbit1;
endpoints->b[3] |= pbit1;
}
else
{
for (i=0; i < num_subsets * 2; i++)
{
pbit=GetBit(block,start_bit);
endpoints->r[i] |= pbit;
endpoints->g[i] |= pbit;
endpoints->b[i] |= pbit;
endpoints->a[i] |= pbit;
}
}
}
/* 1 bit increased due to the pbit */
if (has_pbits != MagickFalse)
{
color_bits++;
alpha_bits++;
}
/* color and alpha bit shifting so that MSB lies in bit 7 */
for (i=0; i < num_subsets * 2; i++)
{
endpoints->r[i] <<= (8 - color_bits);
endpoints->g[i] <<= (8 - color_bits);
endpoints->b[i] <<= (8 - color_bits);
endpoints->a[i] <<= (8 - alpha_bits);
endpoints->r[i]=endpoints->r[i] | (endpoints->r[i] >> color_bits);
endpoints->g[i]=endpoints->g[i] | (endpoints->g[i] >> color_bits);
endpoints->b[i]=endpoints->b[i] | (endpoints->b[i] >> color_bits);
endpoints->a[i]=endpoints->a[i] | (endpoints->a[i] >> alpha_bits);
}
if (has_alpha == MagickFalse)
{
for (i=0; i < num_subsets * 2; i++)
endpoints->a[i]=255;
}
}
static MagickBooleanType ReadBC7Pixels(Image *image,
const DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
BC7Colors
colors;
Quantum
*q;
size_t
mode,
start_bit;
ssize_t
count,
i,
x,
y;
unsigned char
a,
alpha_indices[16],
b,
block[16],
c0,
c1,
color_indices[16],
g,
index_prec,
index2_prec,
num_bits,
num_subsets,
partition_id,
r,
rotation,
selector_bit,
subset_indices[16],
weight;
magick_unreferenced(dds_info);
memset(alpha_indices,0,sizeof(alpha_indices));
memset(block,0,sizeof(block));
memset(color_indices,0,sizeof(color_indices));
memset(subset_indices,0,sizeof(subset_indices));
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 16 bytes of data from the image */
count=ReadBlob(image,16,block);
if (count != 16)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Get the mode of the block */
start_bit=0;
while (start_bit <= 8 && !GetBit(block, &start_bit)) {}
mode=start_bit-1;
if (mode > 7)
return(MagickFalse);
num_subsets=BC7_mode_info[mode].num_subsets;
partition_id=0;
/* only these modes have more than 1 subset */
if ((mode == 0) || (mode == 1) || (mode == 2) || (mode == 3) || (mode == 7))
{
partition_id=GetBits(block,&start_bit,BC7_mode_info[mode].partition_bits);
if (partition_id > 63)
return(MagickFalse);
}
rotation=0;
if ((mode == 4) || (mode == 5))
rotation=GetBits(block,&start_bit,2);
selector_bit=0;
if (mode == 4)
selector_bit=GetBit(block, &start_bit);
ReadEndpoints(&colors,block,mode,&start_bit);
index_prec=BC7_mode_info[mode].index_precision;
index2_prec=BC7_mode_info[mode].index2_precision;
if ((mode == 4) && (selector_bit == 1))
{
index_prec=3;
alpha_indices[0]=GetBit(block,&start_bit);
for (i = 1; i < 16; i++)
alpha_indices[i]=GetBits(block,&start_bit,2);
}
/* get color and subset indices */
for (i=0; i < 16; i++)
{
subset_indices[i]=GetSubsetIndex(num_subsets,partition_id,i);
num_bits=index_prec;
if (IsPixelAnchorIndex(subset_indices[i],num_subsets,i,partition_id))
num_bits--;
color_indices[i]=GetBits(block,&start_bit,num_bits);
}
/* get alpha indices if the block has it */
if ((mode == 5) || ((mode == 4) && (selector_bit == 0)))
{
alpha_indices[0]=GetBits(block,&start_bit,index2_prec - 1);
for (i=1; i < 16; i++)
alpha_indices[i]=GetBits(block,&start_bit,index2_prec);
}
/* Write the pixels */
for (i=0; i < 16; i++)
{
unsigned char
c2;
c0=2 * subset_indices[i];
c1=(2 * subset_indices[i]) + 1;
c2=color_indices[i];
weight=64;
/* Color Interpolation */
switch(index_prec)
{
case 2:
if (c2 < sizeof(BC7_weight2))
weight=BC7_weight2[c2];
break;
case 3:
if (c2 < sizeof(BC7_weight3))
weight=BC7_weight3[c2];
break;
default:
if (c2 < sizeof(BC7_weight4))
weight=BC7_weight4[c2];
}
r=((64 - weight) * colors.r[c0] + weight * colors.r[c1] + 32) >> 6;
g=((64 - weight) * colors.g[c0] + weight * colors.g[c1] + 32) >> 6;
b=((64 - weight) * colors.b[c0] + weight * colors.b[c1] + 32) >> 6;
a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6;
/* Interpolate alpha for mode 4 and 5 blocks */
if (mode == 4 || mode == 5)
{
unsigned char
a0;
a0=alpha_indices[i];
if (a0 < sizeof(BC7_weight2))
weight=BC7_weight2[a0];
if ((mode == 4) && (selector_bit == 0) && (a0 < sizeof(BC7_weight3)))
weight=BC7_weight3[a0];
if ((c0 < sizeof(colors.a)) && (c1 < sizeof(colors.a)))
a=((64 - weight) * colors.a[c0] + weight * colors.a[c1] + 32) >> 6;
}
switch (rotation)
{
case 1:
Swap(a,r);
break;
case 2:
Swap(a,g);
break;
case 3:
Swap(a,b);
break;
}
SetPixelRed(image,ScaleCharToQuantum((unsigned char)r),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)g),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)b),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)a),q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadBC7(const ImageInfo *image_info,Image *image,
const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadBC7Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadBC7Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
const DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
x,
y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32 ||
dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,const DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8 ||
dds_info->extFormat == DXGI_FORMAT_R8_UNORM)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
const DDSInfo *dds_info,ExceptionInfo *exception)
{
Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
alphaBits=1;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16 ||
dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM ||
IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000))
{
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,const DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
case FOURCC_DX10:
{
if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D)
{
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
switch (dds_info.extFormat)
{
case DXGI_FORMAT_R8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G6R5_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_B5G5R5A1_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_R8G8B8A8_UNORM:
{
compression = NoCompression;
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
break;
}
case DXGI_FORMAT_B8G8R8X8_UNORM:
{
compression = NoCompression;
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
break;
}
case DXGI_FORMAT_BC1_UNORM:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case DXGI_FORMAT_BC2_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case DXGI_FORMAT_BC3_UNORM:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
case DXGI_FORMAT_BC7_UNORM:
case DXGI_FORMAT_BC7_UNORM_SRGB:
{
alpha_trait = BlendPixelTrait;
compression = BC7Compression;
decoder = ReadBC7;
break;
}
default:
{
/* Unknown format */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP)
cubemap = MagickTrue;
num_images = dds_info.extArraySize;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
if (n == 0)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteDDSImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColorLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
ssize_t
x;
ssize_t
i,
y,
bx,
by;
const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
const Quantum
*p;
ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
option=GetImageOption(image_info,"dds:raw");
if (IsStringTrue(option) == MagickFalse)
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
else
mipmaps=0;
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
|
GB_binop__bor_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bor_uint32
// A.*B function (eWiseMult): GB_AemultB__bor_uint32
// A*D function (colscale): GB_AxD__bor_uint32
// D*A function (rowscale): GB_DxB__bor_uint32
// C+=B function (dense accum): GB_Cdense_accumB__bor_uint32
// C+=b function (dense accum): GB_Cdense_accumb__bor_uint32
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bor_uint32
// C=scalar+B GB_bind1st__bor_uint32
// C=scalar+B' GB_bind1st_tran__bor_uint32
// C=A+scalar GB_bind2nd__bor_uint32
// C=A'+scalar GB_bind2nd_tran__bor_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij) | (bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x) | (y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BOR || GxB_NO_UINT32 || GxB_NO_BOR_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bor_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bor_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x) | (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bor_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij) | (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x) | (aij) ; \
}
GrB_Info GB_bind1st_tran__bor_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij) | (y) ; \
}
GrB_Info GB_bind2nd_tran__bor_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__iseq_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__iseq_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fp64)
// A*D function (colscale): GB (_AxD__iseq_fp64)
// D*A function (rowscale): GB (_DxB__iseq_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fp64)
// C=scalar+B GB (_bind1st__iseq_fp64)
// C=scalar+B' GB (_bind1st_tran__iseq_fp64)
// C=A+scalar GB (_bind2nd__iseq_fp64)
// C=A'+scalar GB (_bind2nd_tran__iseq_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FP64 || GxB_NO_ISEQ_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__iseq_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
motif_base.h | // This code is modified from AutoMine and GraphZero
// Daniel Mawhirter and Bo Wu. SOSP 2019.
// AutoMine: Harmonizing High-Level Abstraction and High Performance for Graph Mining
// Please do not copy or distribute without permission of the author
void automine_3motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters) {
#pragma omp parallel
{
auto &counter = global_counters.at(omp_get_thread_num());
#pragma omp for schedule(dynamic,1)
for(vidType v0 = 0; v0 < g.V(); v0++) {
VertexSet y0 = g.N(v0);
VertexSet y0f0 = bounded(y0,v0);
for(vidType idx1 = 0; idx1 < y0.size(); idx1++) {
vidType v1 = y0.begin()[idx1];
VertexSet y1 = g.N(v1);
counter[0] += difference_num(y0, y1, v1);
}
for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) {
vidType v1 = y0f0.begin()[idx1];
VertexSet y1 = g.N(v1);
counter[1] += intersection_num(y0f0, y1, v1);
}
}
}
}
void automine_4motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters) {
#pragma omp parallel
{
auto &counter = global_counters.at(omp_get_thread_num());
#pragma omp for schedule(dynamic,1)
for(vidType v0 = 0; v0 < g.V(); v0++) {
VertexSet y0 = g.N(v0);
VertexSet y0f0 = bounded(y0,v0);
for(vidType idx1 = 0; idx1 < y0.size(); idx1++) {
vidType v1 = y0.begin()[idx1];
VertexSet y1 = g.N(v1);
VertexSet y0n1f1 = difference_set(y0, y1, v1);
for(vidType idx2 = 0; idx2 < y0n1f1.size(); idx2++) {
vidType v2 = y0n1f1.begin()[idx2];
VertexSet y2 = g.N(v2);
counter[0] += difference_num(y0n1f1, y2, v2);
}
}
for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) {
vidType v1 = y0f0.begin()[idx1];
VertexSet y1 = g.N(v1);
VertexSet y0y1 = intersection_set(y0, y1);
VertexSet y0f0y1f1 = intersection_set(y0f0, y1, v1);
VertexSet n0y1; difference_set(n0y1,y1, y0);
VertexSet n0f0y1; difference_set(n0f0y1,y1, y0);
VertexSet y0n1 = difference_set(y0, y1);
VertexSet y0f0n1f1 = difference_set(y0f0, y1, v1);
for(vidType idx2 = 0; idx2 < y0y1.size(); idx2++) {
vidType v2 = y0y1.begin()[idx2];
VertexSet y2 = g.N(v2);
counter[4] += difference_num(y0y1, y2, v2);
VertexSet n0n1y2; counter[2] += difference_num(difference_set(n0n1y2,y2, y0), y1);
}
for(vidType idx2 = 0; idx2 < y0f0y1f1.size(); idx2++) {
vidType v2 = y0f0y1f1.begin()[idx2];
VertexSet y2 = g.N(v2);
counter[5] += intersection_num(y0f0y1f1, y2, v2);
}
for(vidType idx2 = 0; idx2 < y0n1.size(); idx2++) {
vidType v2 = y0n1.begin()[idx2];
VertexSet y2 = g.N(v2);
counter[1] += difference_num(n0y1, y2);
}
for(vidType idx2 = 0; idx2 < y0f0n1f1.size(); idx2++) {
vidType v2 = y0f0n1f1.begin()[idx2];
VertexSet y2 = g.N(v2);
counter[3] += intersection_num(n0f0y1, y2, v0);
}
}
}
}
}
void automine_5motif(Graph &g, std::vector<std::vector<uint64_t>> &global_counters) {
#pragma omp parallel
{
auto &counter = global_counters.at(omp_get_thread_num());
#pragma omp for schedule(dynamic,1)
for(vidType v0 = 0; v0 < g.V(); v0++) {
VertexSet y0 = g.N(v0);
VertexSet y0f0 = bounded(y0,v0);
for(vidType idx1 = 0; idx1 < y0.size(); idx1++) {
vidType v1 = y0.begin()[idx1];
VertexSet y1 = g.N(v1);
VertexSet y0y1 = intersection_set(y0, y1);
VertexSet n0y1; difference_set(n0y1,y1, y0);
VertexSet n0y1f0 = bounded(n0y1,v0);
VertexSet y0n1 = difference_set(y0, y1);
VertexSet y0n1f1 = bounded(y0n1,v1);
for(vidType idx2 = 0; idx2 < n0y1f0.size(); idx2++) {
vidType v2 = n0y1f0.begin()[idx2];
VertexSet y2 = g.N(v2);
VertexSet y0n1y2 = intersection_set(y0n1, y2);
VertexSet y0n1y2f1 = bounded(y0n1y2,v1);
VertexSet y0n1f1y2 = intersection_set(y0n1f1, y2);
for(vidType idx3 = 0; idx3 < y0n1y2.size(); idx3++) {
vidType v3 = y0n1y2.begin()[idx3];
VertexSet y3 = g.N(v3);
VertexSet n0n1n2y3; counter[3] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2);
}
for(vidType idx3 = 0; idx3 < y0n1y2f1.size(); idx3++) {
vidType v3 = y0n1y2f1.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[5] += difference_num(y0n1f1y2, y3, v3);
}
}
for(vidType idx2 = 0; idx2 < y0n1.size(); idx2++) {
vidType v2 = y0n1.begin()[idx2];
VertexSet y2 = g.N(v2);
VertexSet n0y1n2 = difference_set(n0y1, y2);
VertexSet y0n1n2f2 = difference_set(y0n1, y2, v2);
for(vidType idx3 = 0; idx3 < y0n1n2f2.size(); idx3++) {
vidType v3 = y0n1n2f2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[1] += difference_num(n0y1n2, y3);
}
}
for(vidType idx2 = 0; idx2 < y0n1f1.size(); idx2++) {
vidType v2 = y0n1f1.begin()[idx2];
VertexSet y2 = g.N(v2);
VertexSet y0y1y2 = intersection_set(y0y1, y2);
VertexSet n0n1y2; difference_set(n0n1y2,difference_set(n0n1y2,y2, y0), y1);
VertexSet n0y1n2 = difference_set(n0y1, y2);
VertexSet y0n1f1n2f2 = difference_set(y0n1f1, y2, v2);
for(vidType idx3 = 0; idx3 < y0y1y2.size(); idx3++) {
vidType v3 = y0y1y2.begin()[idx3];
VertexSet y3 = g.N(v3);
VertexSet n0n1n2y3; counter[4] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2);
}
for(vidType idx3 = 0; idx3 < n0y1n2.size(); idx3++) {
vidType v3 = n0y1n2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[8] += difference_num(n0n1y2, y3);
}
for(vidType idx3 = 0; idx3 < y0n1f1n2f2.size(); idx3++) {
vidType v3 = y0n1f1n2f2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[0] += difference_num(y0n1f1n2f2, y3, v3);
}
}
}
for(vidType idx1 = 0; idx1 < y0f0.size(); idx1++) {
vidType v1 = y0f0.begin()[idx1];
VertexSet y1 = g.N(v1);
VertexSet y0y1 = intersection_set(y0, y1);
VertexSet y0f0y1f1 = intersection_set(y0f0, y1, v1);
VertexSet n0y1; difference_set(n0y1,y1, y0);
VertexSet n0f0y1; difference_set(n0f0y1,y1, y0);
VertexSet y0n1 = difference_set(y0, y1);
VertexSet y0f0n1f1 = difference_set(y0f0, y1, v1);
for(vidType idx2 = 0; idx2 < y0y1.size(); idx2++) {
vidType v2 = y0y1.begin()[idx2];
VertexSet y2 = g.N(v2);
VertexSet n0y1y2 = intersection_set(n0y1, y2);
VertexSet n0f0y1y2 = intersection_set(n0f0y1, y2);
VertexSet y0n1y2 = intersection_set(y0n1, y2);
VertexSet y0f0n1y2f1 = intersection_set(y0f0n1f1, y2, v1);
VertexSet y0y1n2 = difference_set(y0y1, y2);
VertexSet y0y1n2f2 = bounded(y0y1n2,v2);
VertexSet n0n1y2; difference_set(n0n1y2,difference_set(n0n1y2,y2, y0), y1);
VertexSet n0n1y2f0 = bounded(n0n1y2,v0);
VertexSet n0f0n1y2; difference_set(n0f0n1y2,difference_set(n0f0n1y2,y2, y0), y1);
VertexSet n0y1n2 = difference_set(n0y1, y2);
VertexSet y0n1n2 = difference_set(y0n1, y2);
for(vidType idx3 = 0; idx3 < y0n1y2.size(); idx3++) {
vidType v3 = y0n1y2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[13] += difference_num(n0y1y2, y3);
}
for(vidType idx3 = 0; idx3 < y0f0n1y2f1.size(); idx3++) {
vidType v3 = y0f0n1y2f1.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[18] += intersection_num(n0f0y1y2, y3, v0);
}
for(vidType idx3 = 0; idx3 < y0y1n2.size(); idx3++) {
vidType v3 = y0y1n2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[17] += intersection_num(y0y1n2, y3, v3);
VertexSet n0n1n2y3; counter[10] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2);
}
for(vidType idx3 = 0; idx3 < y0y1n2f2.size(); idx3++) {
vidType v3 = y0y1n2f2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[16] += intersection_num(n0n1y2, y3);
counter[6] += difference_num(y0y1n2f2, y3, v3);
}
for(vidType idx3 = 0; idx3 < n0n1y2.size(); idx3++) {
vidType v3 = n0n1y2.begin()[idx3];
VertexSet y3 = g.N(v3);
VertexSet n0n1n2y3; counter[9] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2);
counter[2] += difference_num(n0n1y2, y3, v3);
}
for(vidType idx3 = 0; idx3 < n0n1y2f0.size(); idx3++) {
vidType v3 = n0n1y2f0.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[14] += intersection_num(n0f0n1y2, y3, v3);
}
for(vidType idx3 = 0; idx3 < y0n1n2.size(); idx3++) {
vidType v3 = y0n1n2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[12] += intersection_num(n0y1n2, y3);
counter[7] += difference_num(n0y1n2, y3);
}
}
for(vidType idx2 = 0; idx2 < y0f0y1f1.size(); idx2++) {
vidType v2 = y0f0y1f1.begin()[idx2];
VertexSet y2 = g.N(v2);
VertexSet y0y1y2 = intersection_set(y0y1, y2);
VertexSet y0f0y1f1y2f2 = intersection_set(y0f0y1f1, y2, v2);
for(vidType idx3 = 0; idx3 < y0y1y2.size(); idx3++) {
vidType v3 = y0y1y2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[19] += difference_num(y0y1y2, y3, v3);
VertexSet n0n1n2y3; counter[15] += difference_num(difference_set(n0n1n2y3,difference_set(n0n1n2y3,y3, y0), y1), y2);
}
for(vidType idx3 = 0; idx3 < y0f0y1f1y2f2.size(); idx3++) {
vidType v3 = y0f0y1f1y2f2.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[20] += intersection_num(y0f0y1f1y2f2, y3, v3);
}
}
for(vidType idx2 = 0; idx2 < y0f0n1f1.size(); idx2++) {
vidType v2 = y0f0n1f1.begin()[idx2];
VertexSet y2 = g.N(v2);
VertexSet n0f0n1y2; difference_set(n0f0n1y2,difference_set(n0f0n1y2,y2, y0), y1);
VertexSet n0y1n2f0 = difference_set(n0f0y1, y2, v0);
for(vidType idx3 = 0; idx3 < n0y1n2f0.size(); idx3++) {
vidType v3 = n0y1n2f0.begin()[idx3];
VertexSet y3 = g.N(v3);
counter[11] += intersection_num(n0f0n1y2, y3, v0);
}
}
}
}
}
}
void automine_kmotif(Graph &g, unsigned k, std::vector<std::vector<uint64_t>> &counters) {
std::cout << "Running AutoMine " << k << "-motif solver\n";
if (k == 3) {
automine_3motif(g, counters);
} else if (k == 4) {
automine_4motif(g, counters);
} else if (k == 5) {
automine_5motif(g, counters);
} else {
std::cout << "Not implemented yet\n";
exit(0);
}
}
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if ((width == 0) || (height == 0))
return(threshold_image);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically performs image thresholding
% dependent on which method you specify.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const double *histogram)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(histogram);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT)
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n g e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RangeThresholdImage() applies soft and hard thresholding.
%
% The format of the RangeThresholdImage method is:
%
% MagickBooleanType RangeThresholdImage(Image *image,
% const double low_black,const double low_white,const double high_white,
% const double high_black,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low_black: Define the minimum black threshold value.
%
% o low_white: Define the minimum white threshold value.
%
% o high_white: Define the maximum white threshold value.
%
% o high_black: Define the maximum black threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RangeThresholdImage(Image *image,
const double low_black,const double low_white,const double high_white,
const double high_black,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Range threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < low_black)
q[i]=0;
else
if ((pixel >= low_black) && (pixel < low_white))
q[i]=ClampToQuantum(QuantumRange*
PerceptibleReciprocal(low_white-low_black)*(pixel-low_black));
else
if ((pixel >= low_white) && (pixel <= high_white))
q[i]=QuantumRange;
else
if ((pixel > high_white) && (pixel <= high_black))
q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal(
high_black-high_white)*(high_black-pixel));
else
if (pixel > high_black)
q[i]=0;
else
q[i]=0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
c7f75db0cd39a618fa39b010b313075df98415a3.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "omp.h"
struct dataobj
{
void *restrict data;
int * size;
int * npsize;
int * dsize;
int * hsize;
int * hofs;
int * oofs;
} ;
struct profiler
{
double section0;
} ;
int norm2(const float h_x, const float h_y, const float h_z, struct dataobj *restrict n_vec, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int time_M, const int time_m, struct profiler * timers)
{
float (*restrict n) __attribute__ ((aligned (64))) = (float (*)) n_vec->data;
float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data;
float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data;
#pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
float sum = 0.0F;
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp target teams distribute parallel for collapse(2) reduction(+:sum)
for (int time = time_m; time <= time_M; time += 1)
{
for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1)
{
int ii_rec_0 = (int)(floor((-o_x + rec_coords[p_rec][0])/h_x));
int ii_rec_1 = (int)(floor((-o_y + rec_coords[p_rec][1])/h_y));
int ii_rec_2 = (int)(floor((-o_z + rec_coords[p_rec][2])/h_z));
if (x_M >= ii_rec_0 && y_M >= ii_rec_1 && z_M >= ii_rec_2 && x_m <= ii_rec_0 && y_m <= ii_rec_1 && z_m <= ii_rec_2)
{
sum += fabs(pow(rec[time][p_rec], 2));
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000;
n[0] = sum;
#pragma omp target exit data map(delete: rec[0:rec_vec->size[0]][0:rec_vec->size[1]])
#pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]])
return 0;
}
|
par_build_table.c | /* Generated by Cython 0.20.1 on Tue Apr 22 20:29:04 2014 */
#define PY_SSIZE_T_CLEAN
#ifndef CYTHON_USE_PYLONG_INTERNALS
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 0
#else
#include "pyconfig.h"
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 1
#else
#define CYTHON_USE_PYLONG_INTERNALS 0
#endif
#endif
#endif
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02040000
#error Cython requires Python 2.4+.
#else
#define CYTHON_ABI "0_20_1"
#include <stddef.h> /* For offsetof */
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if CYTHON_COMPILING_IN_PYPY
#define Py_OptimizeFlag 0
#endif
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#define PY_FORMAT_SIZE_T ""
#define CYTHON_FORMAT_SSIZE_T ""
#define PyInt_FromSsize_t(z) PyInt_FromLong(z)
#define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o)
#define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \
(PyErr_Format(PyExc_TypeError, \
"expected index value, got %.200s", Py_TYPE(o)->tp_name), \
(PyObject*)0))
#define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
!PyComplex_Check(o))
#define PyIndex_Check __Pyx_PyIndex_Check
#define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
#define __PYX_BUILD_PY_SSIZE_T "i"
#else
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#define __Pyx_PyIndex_Check PyIndex_Check
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
#define PyVarObject_HEAD_INIT(type, size) \
PyObject_HEAD_INIT(type) size,
#define PyType_Modified(t)
typedef struct {
void *buf;
PyObject *obj;
Py_ssize_t len;
Py_ssize_t itemsize;
int readonly;
int ndim;
char *format;
Py_ssize_t *shape;
Py_ssize_t *strides;
Py_ssize_t *suboffsets;
void *internal;
} Py_buffer;
#define PyBUF_SIMPLE 0
#define PyBUF_WRITABLE 0x0001
#define PyBUF_FORMAT 0x0004
#define PyBUF_ND 0x0008
#define PyBUF_STRIDES (0x0010 | PyBUF_ND)
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#endif
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_TPFLAGS_HAVE_VERSION_TAG 0
#endif
#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT)
#define Py_TPFLAGS_IS_ABSTRACT 0
#endif
#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyBytesObject PyStringObject
#define PyBytes_Type PyString_Type
#define PyBytes_Check PyString_Check
#define PyBytes_CheckExact PyString_CheckExact
#define PyBytes_FromString PyString_FromString
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
#define PyBytes_FromFormat PyString_FromFormat
#define PyBytes_DecodeEscape PyString_DecodeEscape
#define PyBytes_AsString PyString_AsString
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
#define PyBytes_Size PyString_Size
#define PyBytes_AS_STRING PyString_AS_STRING
#define PyBytes_GET_SIZE PyString_GET_SIZE
#define PyBytes_Repr PyString_Repr
#define PyBytes_Concat PyString_Concat
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#if PY_VERSION_HEX < 0x02060000
#define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
#define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
#define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
#define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
#define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
#else
#define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
#define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
#define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
#endif
#if PY_MAJOR_VERSION >= 3
#define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
#else
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_NAMESTR(n) ((char *)(n))
#define __Pyx_DOCSTR(n) ((char *)(n))
#else
#define __Pyx_NAMESTR(n) (n)
#define __Pyx_DOCSTR(n) (n)
#endif
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#define __PYX_HAVE__radiotool__algorithms__par_build_table
#define __PYX_HAVE_API__radiotool__algorithms__par_build_table
#include "string.h"
#include "stdio.h"
#include "pythread.h"
#include "stdlib.h"
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \
(sizeof(type) < sizeof(Py_ssize_t)) || \
(sizeof(type) > sizeof(Py_ssize_t) && \
likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX) && \
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \
v == (type)PY_SSIZE_T_MIN))) || \
(sizeof(type) == sizeof(Py_ssize_t) && \
(is_signed || likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s)
#define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s)
#define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s)
#define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s)
#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return u_end - u - 1;
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys = NULL;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
sys = PyImport_ImportModule("sys");
if (sys == NULL) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
if (default_encoding == NULL) goto bad;
if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
const char* default_encoding_c = PyBytes_AS_STRING(default_encoding);
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (ascii_chars_u == NULL) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
}
Py_XDECREF(sys);
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return 0;
bad:
Py_XDECREF(sys);
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys = NULL;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (sys == NULL) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
if (default_encoding == NULL) goto bad;
default_encoding_c = PyBytes_AS_STRING(default_encoding);
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(sys);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(sys);
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
#ifdef __GNUC__
/* Test for GCC > 2.95 */
#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ > 2 ... */
#else /* __GNUC__ */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"par_build_table.pyx",
"array.pxd",
"stringsource",
};
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name; /* for error messages only */
struct __Pyx_StructField_* fields;
size_t size; /* sizeof(type) */
size_t arraysize[8]; /* length of array in each dimension */
int ndim;
char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && MSC_VER
#include <Windows.h>
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using MSVC atomics"
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview) \
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview) \
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/*--- Type declarations ---*/
#ifndef _ARRAYARRAY_H
struct arrayobject;
typedef struct arrayobject arrayobject;
#endif
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params;
struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table;
/* "radiotool/algorithms/par_build_table.pyx":11
* from cython cimport parallel
*
* cdef struct Params: # <<<<<<<<<<<<<<
* double pen_val
* int p0
*/
struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params {
double pen_val;
int p0;
int p0_full;
int n_beats;
int n_pauses;
int min_beats;
int max_beats;
int max_beats_with_padding;
int all_full;
};
/* "radiotool/algorithms/par_build_table.pyx":481
*
*
* cpdef int[:] build_table(double[:, :] trans_cost, double[:, :] penalty, # <<<<<<<<<<<<<<
* int min_beats=-1, int max_beats=-1, int first_pause=-1):
*
*/
struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table {
int __pyx_n;
int min_beats;
int max_beats;
int first_pause;
};
/* "View.MemoryView":96
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":275
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":308
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":930
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":308
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":930
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
if (acquire_gil) { \
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
PyGILState_Release(__pyx_gilstate_save); \
} else { \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext() \
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif /* CYTHON_REFNANNY */
#define __Pyx_XDECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_XDECREF(tmp); \
} while (0)
#define __Pyx_DECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_DECREF(tmp); \
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback); /*proto*/
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
const char* function_name); /*proto*/
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact); /*proto*/
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /*proto*/
#include <string.h>
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
#define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
#ifndef _ARRAYARRAY_H
#define _ARRAYARRAY_H
typedef struct arraydescr {
int typecode;
int itemsize;
PyObject * (*getitem)(struct arrayobject *, Py_ssize_t);
int (*setitem)(struct arrayobject *, Py_ssize_t, PyObject *);
#if PY_VERSION_HEX >= 0x03000000
char *formats;
#endif
} arraydescr;
struct arrayobject {
PyObject_HEAD
Py_ssize_t ob_size;
union {
char *ob_item;
float *as_floats;
double *as_doubles;
int *as_ints;
unsigned int *as_uints;
unsigned char *as_uchars;
signed char *as_schars;
char *as_chars;
unsigned long *as_ulongs;
long *as_longs;
short *as_shorts;
unsigned short *as_ushorts;
Py_UNICODE *as_pyunicodes;
void *as_voidptr;
} data;
Py_ssize_t allocated;
struct arraydescr *ob_descr;
PyObject *weakreflist; /* List of weak references */
#if PY_VERSION_HEX >= 0x03000000
int ob_exports; /* Number of exported buffers */
#endif
};
#ifndef NO_NEWARRAY_INLINE
static CYTHON_INLINE PyObject * newarrayobject(PyTypeObject *type, Py_ssize_t size,
struct arraydescr *descr) {
arrayobject *op;
size_t nbytes;
if (size < 0) {
PyErr_BadInternalCall();
return NULL;
}
nbytes = size * descr->itemsize;
if (nbytes / descr->itemsize != (size_t)size) {
return PyErr_NoMemory();
}
op = (arrayobject *) type->tp_alloc(type, 0);
if (op == NULL) {
return NULL;
}
op->ob_descr = descr;
op->allocated = size;
op->weakreflist = NULL;
op->ob_size = size;
if (size <= 0) {
op->data.ob_item = NULL;
}
else {
op->data.ob_item = PyMem_NEW(char, nbytes);
if (op->data.ob_item == NULL) {
Py_DECREF(op);
return PyErr_NoMemory();
}
}
return (PyObject *) op;
}
#else
PyObject* newarrayobject(PyTypeObject *type, Py_ssize_t size,
struct arraydescr *descr);
#endif /* ifndef NO_NEWARRAY_INLINE */
static CYTHON_INLINE int resize(arrayobject *self, Py_ssize_t n) {
void *items = (void*) self->data.ob_item;
PyMem_Resize(items, char, (size_t)(n * self->ob_descr->itemsize));
if (items == NULL) {
PyErr_NoMemory();
return -1;
}
self->data.ob_item = (char*) items;
self->ob_size = n;
self->allocated = n;
return 0;
}
static CYTHON_INLINE int resize_smart(arrayobject *self, Py_ssize_t n) {
void *items = (void*) self->data.ob_item;
Py_ssize_t newsize;
if (n < self->allocated) {
if (n*4 > self->allocated) {
self->ob_size = n;
return 0;
}
}
newsize = n * 3 / 2 + 1;
PyMem_Resize(items, char, (size_t)(newsize * self->ob_descr->itemsize));
if (items == NULL) {
PyErr_NoMemory();
return -1;
}
self->data.ob_item = (char*) items;
self->ob_size = n;
self->allocated = newsize;
return 0;
}
#endif
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
static PyObject *__pyx_memview_get_int(const char *itemp); /* proto */
static int __pyx_memview_set_int(const char *itemp, PyObject *obj); /* proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim);
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *);
static int __Pyx_check_binary_version(void);
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/
typedef struct {
int code_line;
PyCodeObject* code_object;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename); /*proto*/
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.exc' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'array' */
/* Module declarations from 'cpython.array' */
static PyTypeObject *__pyx_ptype_7cpython_5array_array = 0;
static CYTHON_INLINE arrayobject *__pyx_f_7cpython_5array_clone(arrayobject *, Py_ssize_t, int); /*proto*/
static CYTHON_INLINE int __pyx_f_7cpython_5array_extend_buffer(arrayobject *, char *, Py_ssize_t); /*proto*/
/* Module declarations from 'radiotool.algorithms.par_build_table' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__Pyx_memviewslice, int, __Pyx_memviewslice, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params); /*proto*/
static double __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__Pyx_memviewslice, int, int, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__Pyx_memviewslice, int, __Pyx_memviewslice, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_backward_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int, __Pyx_memviewslice, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static __Pyx_memviewslice __pyx_f_9radiotool_10algorithms_15par_build_table_build_table(__Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch, struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table *__pyx_optional_args); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 };
#define __Pyx_MODULE_NAME "radiotool.algorithms.par_build_table"
int __pyx_module_is_main_radiotool__algorithms__par_build_table = 0;
/* Implementation of 'radiotool.algorithms.par_build_table' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_xrange;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static PyObject *__pyx_pf_9radiotool_10algorithms_15par_build_table_build_table(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_trans_cost, __Pyx_memviewslice __pyx_v_penalty, int __pyx_v_min_beats, int __pyx_v_max_beats, int __pyx_v_first_pause); /* proto */
static int __pyx_pf_7cpython_5array_5array___getbuffer__(arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info, CYTHON_UNUSED int __pyx_v_flags); /* proto */
static void __pyx_pf_7cpython_5array_5array_2__releasebuffer__(CYTHON_UNUSED arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static int __pyx_array_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *get_memview_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static int __pyx_MemviewEnum_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static int __pyx_memoryview_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static void __pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static char __pyx_k_O[] = "O";
static char __pyx_k_c[] = "c";
static char __pyx_k_d[] = "d";
static char __pyx_k_i[] = "i";
static char __pyx_k_id[] = "id";
static char __pyx_k_obj[] = "obj";
static char __pyx_k_base[] = "base";
static char __pyx_k_main[] = "__main__";
static char __pyx_k_mode[] = "mode";
static char __pyx_k_name[] = "name";
static char __pyx_k_ndim[] = "ndim";
static char __pyx_k_pack[] = "pack";
static char __pyx_k_size[] = "size";
static char __pyx_k_step[] = "step";
static char __pyx_k_stop[] = "stop";
static char __pyx_k_test[] = "__test__";
static char __pyx_k_ASCII[] = "ASCII";
static char __pyx_k_class[] = "__class__";
static char __pyx_k_error[] = "error";
static char __pyx_k_flags[] = "flags";
static char __pyx_k_range[] = "range";
static char __pyx_k_shape[] = "shape";
static char __pyx_k_start[] = "start";
static char __pyx_k_decode[] = "decode";
static char __pyx_k_encode[] = "encode";
static char __pyx_k_format[] = "format";
static char __pyx_k_import[] = "__import__";
static char __pyx_k_name_2[] = "__name__";
static char __pyx_k_struct[] = "struct";
static char __pyx_k_unpack[] = "unpack";
static char __pyx_k_xrange[] = "xrange";
static char __pyx_k_fortran[] = "fortran";
static char __pyx_k_memview[] = "memview";
static char __pyx_k_penalty[] = "penalty";
static char __pyx_k_Ellipsis[] = "Ellipsis";
static char __pyx_k_itemsize[] = "itemsize";
static char __pyx_k_TypeError[] = "TypeError";
static char __pyx_k_enumerate[] = "enumerate";
static char __pyx_k_max_beats[] = "max_beats";
static char __pyx_k_min_beats[] = "min_beats";
static char __pyx_k_IndexError[] = "IndexError";
static char __pyx_k_ValueError[] = "ValueError";
static char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static char __pyx_k_trans_cost[] = "trans_cost";
static char __pyx_k_MemoryError[] = "MemoryError";
static char __pyx_k_first_pause[] = "first_pause";
static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static char __pyx_k_allocate_buffer[] = "allocate_buffer";
static char __pyx_k_dtype_is_object[] = "dtype_is_object";
static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer";
static char __pyx_k_strided_and_direct[] = "<strided and direct>";
static char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)";
static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct";
static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)";
static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)";
static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced";
static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions";
static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static char __pyx_k_unable_to_allocate_shape_or_stri[] = "unable to allocate shape or strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_b_c;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_d;
static PyObject *__pyx_n_s_decode;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_first_pause;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_b_fortran;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_max_beats;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_min_beats;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_penalty;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_releasebuffer;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_trans_cost;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_or_stri;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_xrange;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
/* "radiotool/algorithms/par_build_table.pyx":23
*
*
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int tc_index = 0
* cdef int beat_seg_i = 0
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__Pyx_memviewslice __pyx_v_tc, int __pyx_v_column, __Pyx_memviewslice __pyx_v_tc_column, int __pyx_v_backward, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p) {
int __pyx_v_tc_index;
int __pyx_v_beat_seg_i;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
long __pyx_t_26;
int __pyx_t_27;
/* "radiotool/algorithms/par_build_table.pyx":24
*
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil:
* cdef int tc_index = 0 # <<<<<<<<<<<<<<
* cdef int beat_seg_i = 0
* cdef int i, j
*/
__pyx_v_tc_index = 0;
/* "radiotool/algorithms/par_build_table.pyx":25
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil:
* cdef int tc_index = 0
* cdef int beat_seg_i = 0 # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
__pyx_v_beat_seg_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":28
* cdef int i, j
*
* if column >= p.p0_full: # <<<<<<<<<<<<<<
* tc_index = p.p0 + (column - p.p0_full)
* else:
*/
__pyx_t_1 = ((__pyx_v_column >= __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":29
*
* if column >= p.p0_full:
* tc_index = p.p0 + (column - p.p0_full) # <<<<<<<<<<<<<<
* else:
* tc_index = column % p.n_beats
*/
__pyx_v_tc_index = (__pyx_v_p.p0 + (__pyx_v_column - __pyx_v_p.p0_full));
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":31
* tc_index = p.p0 + (column - p.p0_full)
* else:
* tc_index = column % p.n_beats # <<<<<<<<<<<<<<
*
* if not backward:
*/
__pyx_v_tc_index = (__pyx_v_column % __pyx_v_p.n_beats);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":33
* tc_index = column % p.n_beats
*
* if not backward: # <<<<<<<<<<<<<<
* for i in range(p.max_beats):
* for j in range(p.p0):
*/
__pyx_t_1 = ((!(__pyx_v_backward != 0)) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":34
*
* if not backward:
* for i in range(p.max_beats): # <<<<<<<<<<<<<<
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[j, tc_index]
*/
__pyx_t_2 = __pyx_v_p.max_beats;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":35
* if not backward:
* for i in range(p.max_beats):
* for j in range(p.p0): # <<<<<<<<<<<<<<
* tc_column[i * p.n_beats + j] = tc[j, tc_index]
*
*/
__pyx_t_4 = __pyx_v_p.p0;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_j = __pyx_t_5;
/* "radiotool/algorithms/par_build_table.pyx":36
* for i in range(p.max_beats):
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[j, tc_index] # <<<<<<<<<<<<<<
*
* for i in range(p.p0_full, p.all_full):
*/
__pyx_t_6 = __pyx_v_j;
__pyx_t_7 = __pyx_v_tc_index;
__pyx_t_8 = ((__pyx_v_i * __pyx_v_p.n_beats) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_8 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_6 * __pyx_v_tc.strides[0]) ) + __pyx_t_7 * __pyx_v_tc.strides[1]) )));
}
}
/* "radiotool/algorithms/par_build_table.pyx":38
* tc_column[i * p.n_beats + j] = tc[j, tc_index]
*
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* tc_column[i] = tc[p.p0 + i - p.p0_full, tc_index]
* else:
*/
__pyx_t_2 = __pyx_v_p.all_full;
for (__pyx_t_3 = __pyx_v_p.p0_full; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":39
*
* for i in range(p.p0_full, p.all_full):
* tc_column[i] = tc[p.p0 + i - p.p0_full, tc_index] # <<<<<<<<<<<<<<
* else:
* for i in range(p.max_beats):
*/
__pyx_t_4 = ((__pyx_v_p.p0 + __pyx_v_i) - __pyx_v_p.p0_full);
__pyx_t_5 = __pyx_v_tc_index;
__pyx_t_9 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_9 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_4 * __pyx_v_tc.strides[0]) ) + __pyx_t_5 * __pyx_v_tc.strides[1]) )));
}
goto __pyx_L4;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":41
* tc_column[i] = tc[p.p0 + i - p.p0_full, tc_index]
* else:
* for i in range(p.max_beats): # <<<<<<<<<<<<<<
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[tc_index, j]
*/
__pyx_t_2 = __pyx_v_p.max_beats;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":42
* else:
* for i in range(p.max_beats):
* for j in range(p.p0): # <<<<<<<<<<<<<<
* tc_column[i * p.n_beats + j] = tc[tc_index, j]
*
*/
__pyx_t_10 = __pyx_v_p.p0;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "radiotool/algorithms/par_build_table.pyx":43
* for i in range(p.max_beats):
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[tc_index, j] # <<<<<<<<<<<<<<
*
* for i in range(p.p0_full, p.all_full):
*/
__pyx_t_12 = __pyx_v_tc_index;
__pyx_t_13 = __pyx_v_j;
__pyx_t_14 = ((__pyx_v_i * __pyx_v_p.n_beats) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_14 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_12 * __pyx_v_tc.strides[0]) ) + __pyx_t_13 * __pyx_v_tc.strides[1]) )));
}
}
/* "radiotool/algorithms/par_build_table.pyx":45
* tc_column[i * p.n_beats + j] = tc[tc_index, j]
*
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* tc_column[i] = tc[tc_index, p.p0 + i - p.p0_full]
*
*/
__pyx_t_2 = __pyx_v_p.all_full;
for (__pyx_t_3 = __pyx_v_p.p0_full; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":46
*
* for i in range(p.p0_full, p.all_full):
* tc_column[i] = tc[tc_index, p.p0 + i - p.p0_full] # <<<<<<<<<<<<<<
*
* #--- CONSTRAINTS ---#
*/
__pyx_t_10 = __pyx_v_tc_index;
__pyx_t_11 = ((__pyx_v_p.p0 + __pyx_v_i) - __pyx_v_p.p0_full);
__pyx_t_15 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_15 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_10 * __pyx_v_tc.strides[0]) ) + __pyx_t_11 * __pyx_v_tc.strides[1]) )));
}
}
__pyx_L4:;
/* "radiotool/algorithms/par_build_table.pyx":50
* #--- CONSTRAINTS ---#
* # * don't go to pause before minimum length music segment
* if (column == p.p0_full) and (not backward): # <<<<<<<<<<<<<<
* for i in range(p.n_beats * p.min_beats):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = ((__pyx_v_column == __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
__pyx_t_16 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_17 = __pyx_t_16;
} else {
__pyx_t_17 = __pyx_t_1;
}
if (__pyx_t_17) {
/* "radiotool/algorithms/par_build_table.pyx":51
* # * don't go to pause before minimum length music segment
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.min_beats): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
* elif (column < p.n_beats * p.min_beats) and backward:
*/
__pyx_t_2 = (__pyx_v_p.n_beats * __pyx_v_p.min_beats);
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":52
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.min_beats):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
* elif (column < p.n_beats * p.min_beats) and backward:
* tc_column[p.p0_full] += p.pen_val
*/
__pyx_t_18 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_18 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L17;
}
/* "radiotool/algorithms/par_build_table.pyx":53
* for i in range(p.n_beats * p.min_beats):
* tc_column[i] += p.pen_val
* elif (column < p.n_beats * p.min_beats) and backward: # <<<<<<<<<<<<<<
* tc_column[p.p0_full] += p.pen_val
*
*/
__pyx_t_17 = (__pyx_v_column < (__pyx_v_p.n_beats * __pyx_v_p.min_beats));
if (__pyx_t_17) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_17;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":54
* tc_column[i] += p.pen_val
* elif (column < p.n_beats * p.min_beats) and backward:
* tc_column[p.p0_full] += p.pen_val # <<<<<<<<<<<<<<
*
* # * don't go to pause after maximum length music segment
*/
__pyx_t_2 = __pyx_v_p.p0_full;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_2 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
goto __pyx_L17;
}
__pyx_L17:;
/* "radiotool/algorithms/par_build_table.pyx":57
*
* # * don't go to pause after maximum length music segment
* if (column == p.p0_full) and (not backward): # <<<<<<<<<<<<<<
* for i in range(p.n_beats * p.max_beats, p.p0_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = ((__pyx_v_column == __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
__pyx_t_17 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_16 = __pyx_t_17;
} else {
__pyx_t_16 = __pyx_t_1;
}
if (__pyx_t_16) {
/* "radiotool/algorithms/par_build_table.pyx":58
* # * don't go to pause after maximum length music segment
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.max_beats, p.p0_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward:
*/
__pyx_t_3 = __pyx_v_p.p0_full;
for (__pyx_t_19 = (__pyx_v_p.n_beats * __pyx_v_p.max_beats); __pyx_t_19 < __pyx_t_3; __pyx_t_19+=1) {
__pyx_v_i = __pyx_t_19;
/* "radiotool/algorithms/par_build_table.pyx":59
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.max_beats, p.p0_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward:
* tc_column[p.p0_full] += p.pen_val
*/
__pyx_t_20 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_20 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L20;
}
/* "radiotool/algorithms/par_build_table.pyx":60
* for i in range(p.n_beats * p.max_beats, p.p0_full):
* tc_column[i] += p.pen_val
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward: # <<<<<<<<<<<<<<
* tc_column[p.p0_full] += p.pen_val
*
*/
__pyx_t_16 = (__pyx_v_p.p0_full > __pyx_v_column);
if (__pyx_t_16) {
__pyx_t_16 = (__pyx_v_column >= (__pyx_v_p.n_beats * __pyx_v_p.max_beats));
}
if (__pyx_t_16) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_16;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":61
* tc_column[i] += p.pen_val
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward:
* tc_column[p.p0_full] += p.pen_val # <<<<<<<<<<<<<<
*
* # * after pause, don't go to non-first segment beat
*/
__pyx_t_3 = __pyx_v_p.p0_full;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_3 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
goto __pyx_L20;
}
__pyx_L20:;
/* "radiotool/algorithms/par_build_table.pyx":64
*
* # * after pause, don't go to non-first segment beat
* if (p.n_beats <= column < p.p0_full) and (not backward): # <<<<<<<<<<<<<<
* for i in range(p.p0_full, p.all_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = (__pyx_v_p.n_beats <= __pyx_v_column);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_column < __pyx_v_p.p0_full);
}
if ((__pyx_t_1 != 0)) {
__pyx_t_16 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_17 = __pyx_t_16;
} else {
__pyx_t_17 = (__pyx_t_1 != 0);
}
if (__pyx_t_17) {
/* "radiotool/algorithms/par_build_table.pyx":65
* # * after pause, don't go to non-first segment beat
* if (p.n_beats <= column < p.p0_full) and (not backward):
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
* elif (column >= p.p0_full) and backward:
*/
__pyx_t_19 = __pyx_v_p.all_full;
for (__pyx_t_21 = __pyx_v_p.p0_full; __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":66
* if (p.n_beats <= column < p.p0_full) and (not backward):
* for i in range(p.p0_full, p.all_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
* elif (column >= p.p0_full) and backward:
* for i in range(p.n_beats, p.p0_full):
*/
__pyx_t_22 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_22 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L23;
}
/* "radiotool/algorithms/par_build_table.pyx":67
* for i in range(p.p0_full, p.all_full):
* tc_column[i] += p.pen_val
* elif (column >= p.p0_full) and backward: # <<<<<<<<<<<<<<
* for i in range(p.n_beats, p.p0_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_17 = (__pyx_v_column >= __pyx_v_p.p0_full);
if (__pyx_t_17) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_17;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":68
* tc_column[i] += p.pen_val
* elif (column >= p.p0_full) and backward:
* for i in range(p.n_beats, p.p0_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
*
*/
__pyx_t_19 = __pyx_v_p.p0_full;
for (__pyx_t_21 = __pyx_v_p.n_beats; __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":69
* elif (column >= p.p0_full) and backward:
* for i in range(p.n_beats, p.p0_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
*
* # * don't move between beats the don't follow segment index
*/
__pyx_t_23 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_23 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L23;
}
__pyx_L23:;
/* "radiotool/algorithms/par_build_table.pyx":72
*
* # * don't move between beats the don't follow segment index
* if column < p.p0_full: # <<<<<<<<<<<<<<
* for i in range(p.p0_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = ((__pyx_v_column < __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":73
* # * don't move between beats the don't follow segment index
* if column < p.p0_full:
* for i in range(p.p0_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
*
*/
__pyx_t_19 = __pyx_v_p.p0_full;
for (__pyx_t_21 = 0; __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":74
* if column < p.p0_full:
* for i in range(p.p0_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
*
* beat_seg_i = column / p.n_beats
*/
__pyx_t_24 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_24 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
/* "radiotool/algorithms/par_build_table.pyx":76
* tc_column[i] += p.pen_val
*
* beat_seg_i = column / p.n_beats # <<<<<<<<<<<<<<
*
* if (beat_seg_i > 0) and (not backward):
*/
__pyx_v_beat_seg_i = (__pyx_v_column / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":78
* beat_seg_i = column / p.n_beats
*
* if (beat_seg_i > 0) and (not backward): # <<<<<<<<<<<<<<
* for i in range((beat_seg_i - 1) * p.n_beats, beat_seg_i * p.n_beats):
* tc_column[i] -= p.pen_val
*/
__pyx_t_1 = ((__pyx_v_beat_seg_i > 0) != 0);
if (__pyx_t_1) {
__pyx_t_17 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_16 = __pyx_t_17;
} else {
__pyx_t_16 = __pyx_t_1;
}
if (__pyx_t_16) {
/* "radiotool/algorithms/par_build_table.pyx":79
*
* if (beat_seg_i > 0) and (not backward):
* for i in range((beat_seg_i - 1) * p.n_beats, beat_seg_i * p.n_beats): # <<<<<<<<<<<<<<
* tc_column[i] -= p.pen_val
*
*/
__pyx_t_19 = (__pyx_v_beat_seg_i * __pyx_v_p.n_beats);
for (__pyx_t_21 = ((__pyx_v_beat_seg_i - 1) * __pyx_v_p.n_beats); __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":80
* if (beat_seg_i > 0) and (not backward):
* for i in range((beat_seg_i - 1) * p.n_beats, beat_seg_i * p.n_beats):
* tc_column[i] -= p.pen_val # <<<<<<<<<<<<<<
*
* elif (beat_seg_i < p.max_beats - 1) and backward:
*/
__pyx_t_25 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_25 * __pyx_v_tc_column.strides[0]) )) -= __pyx_v_p.pen_val;
}
goto __pyx_L31;
}
/* "radiotool/algorithms/par_build_table.pyx":82
* tc_column[i] -= p.pen_val
*
* elif (beat_seg_i < p.max_beats - 1) and backward: # <<<<<<<<<<<<<<
* for i in range((beat_seg_i + 1) * p.n_beats, (beat_seg_i + 2) * p.n_beats):
* tc_column[i] -= p.pen_val
*/
__pyx_t_16 = (__pyx_v_beat_seg_i < (__pyx_v_p.max_beats - 1));
if (__pyx_t_16) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_16;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":83
*
* elif (beat_seg_i < p.max_beats - 1) and backward:
* for i in range((beat_seg_i + 1) * p.n_beats, (beat_seg_i + 2) * p.n_beats): # <<<<<<<<<<<<<<
* tc_column[i] -= p.pen_val
*
*/
__pyx_t_26 = ((__pyx_v_beat_seg_i + 2) * __pyx_v_p.n_beats);
for (__pyx_t_19 = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats); __pyx_t_19 < __pyx_t_26; __pyx_t_19+=1) {
__pyx_v_i = __pyx_t_19;
/* "radiotool/algorithms/par_build_table.pyx":84
* elif (beat_seg_i < p.max_beats - 1) and backward:
* for i in range((beat_seg_i + 1) * p.n_beats, (beat_seg_i + 2) * p.n_beats):
* tc_column[i] -= p.pen_val # <<<<<<<<<<<<<<
*
* # you're also allowed to move infinitely among the
*/
__pyx_t_21 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_21 * __pyx_v_tc_column.strides[0]) )) -= __pyx_v_p.pen_val;
}
goto __pyx_L31;
}
__pyx_L31:;
/* "radiotool/algorithms/par_build_table.pyx":88
* # you're also allowed to move infinitely among the
* # last beat if max_beats is not set (== -1)
* if p.max_beats == -1 and (beat_seg_i == p.min_beats): # <<<<<<<<<<<<<<
* for i in range(beat_seg_i * p.n_beats, (beat_seg_i + 1) * p.n_beats):
* tc_column[i] -= p.pen_val
*/
__pyx_t_1 = ((__pyx_v_p.max_beats == -1) != 0);
if (__pyx_t_1) {
__pyx_t_16 = ((__pyx_v_beat_seg_i == __pyx_v_p.min_beats) != 0);
__pyx_t_17 = __pyx_t_16;
} else {
__pyx_t_17 = __pyx_t_1;
}
if (__pyx_t_17) {
/* "radiotool/algorithms/par_build_table.pyx":89
* # last beat if max_beats is not set (== -1)
* if p.max_beats == -1 and (beat_seg_i == p.min_beats):
* for i in range(beat_seg_i * p.n_beats, (beat_seg_i + 1) * p.n_beats): # <<<<<<<<<<<<<<
* tc_column[i] -= p.pen_val
*
*/
__pyx_t_26 = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats);
for (__pyx_t_19 = (__pyx_v_beat_seg_i * __pyx_v_p.n_beats); __pyx_t_19 < __pyx_t_26; __pyx_t_19+=1) {
__pyx_v_i = __pyx_t_19;
/* "radiotool/algorithms/par_build_table.pyx":90
* if p.max_beats == -1 and (beat_seg_i == p.min_beats):
* for i in range(beat_seg_i * p.n_beats, (beat_seg_i + 1) * p.n_beats):
* tc_column[i] -= p.pen_val # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_27 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_27 * __pyx_v_tc_column.strides[0]) )) -= __pyx_v_p.pen_val;
}
goto __pyx_L36;
}
__pyx_L36:;
goto __pyx_L28;
}
__pyx_L28:;
/* "radiotool/algorithms/par_build_table.pyx":23
*
*
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int tc_index = 0
* cdef int beat_seg_i = 0
*/
/* function exit code */
}
/* "radiotool/algorithms/par_build_table.pyx":93
*
*
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int pen_index = 0
* if i >= p.p0_full:
*/
static double __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__Pyx_memviewslice __pyx_v_pen, int __pyx_v_i, int __pyx_v_l, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p) {
int __pyx_v_pen_index;
double __pyx_v_new_pen;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
/* "radiotool/algorithms/par_build_table.pyx":94
*
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil:
* cdef int pen_index = 0 # <<<<<<<<<<<<<<
* if i >= p.p0_full:
* pen_index = p.n_beats + (i - p.p0_full)
*/
__pyx_v_pen_index = 0;
/* "radiotool/algorithms/par_build_table.pyx":95
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil:
* cdef int pen_index = 0
* if i >= p.p0_full: # <<<<<<<<<<<<<<
* pen_index = p.n_beats + (i - p.p0_full)
* else:
*/
__pyx_t_1 = ((__pyx_v_i >= __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":96
* cdef int pen_index = 0
* if i >= p.p0_full:
* pen_index = p.n_beats + (i - p.p0_full) # <<<<<<<<<<<<<<
* else:
* pen_index = i % p.n_beats
*/
__pyx_v_pen_index = (__pyx_v_p.n_beats + (__pyx_v_i - __pyx_v_p.p0_full));
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":98
* pen_index = p.n_beats + (i - p.p0_full)
* else:
* pen_index = i % p.n_beats # <<<<<<<<<<<<<<
* cdef double new_pen = pen[pen_index, l]
*
*/
__pyx_v_pen_index = (__pyx_v_i % __pyx_v_p.n_beats);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":99
* else:
* pen_index = i % p.n_beats
* cdef double new_pen = pen[pen_index, l] # <<<<<<<<<<<<<<
*
* #--- CONSTRAINTS ---#
*/
__pyx_t_2 = __pyx_v_pen_index;
__pyx_t_3 = __pyx_v_l;
__pyx_v_new_pen = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_pen.data + __pyx_t_2 * __pyx_v_pen.strides[0]) ) + __pyx_t_3 * __pyx_v_pen.strides[1]) )));
/* "radiotool/algorithms/par_build_table.pyx":103
* #--- CONSTRAINTS ---#
* # * don't start song in segment beat other than first
* if global_start_l == 0 and (p.n_beats <= i < p.p0_full): # <<<<<<<<<<<<<<
* new_pen += p.pen_val
*
*/
__pyx_t_1 = ((__pyx_v_global_start_l == 0) != 0);
if (__pyx_t_1) {
__pyx_t_4 = (__pyx_v_p.n_beats <= __pyx_v_i);
if (__pyx_t_4) {
__pyx_t_4 = (__pyx_v_i < __pyx_v_p.p0_full);
}
__pyx_t_5 = (__pyx_t_4 != 0);
} else {
__pyx_t_5 = __pyx_t_1;
}
if (__pyx_t_5) {
/* "radiotool/algorithms/par_build_table.pyx":104
* # * don't start song in segment beat other than first
* if global_start_l == 0 and (p.n_beats <= i < p.p0_full):
* new_pen += p.pen_val # <<<<<<<<<<<<<<
*
* return new_pen
*/
__pyx_v_new_pen = (__pyx_v_new_pen + __pyx_v_p.pen_val);
goto __pyx_L4;
}
__pyx_L4:;
/* "radiotool/algorithms/par_build_table.pyx":106
* new_pen += p.pen_val
*
* return new_pen # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_new_pen;
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":93
*
*
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int pen_index = 0
* if i >= p.p0_full:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "radiotool/algorithms/par_build_table.pyx":109
*
*
* cdef void get_pen_column(double[:, :] pen, int column, double[:] new_pen, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__Pyx_memviewslice __pyx_v_pen, int __pyx_v_column, __Pyx_memviewslice __pyx_v_new_pen, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p) {
int __pyx_v_i;
int __pyx_v_j;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
/* "radiotool/algorithms/par_build_table.pyx":112
* cdef int i, j
*
* for i in range(p.max_beats): # <<<<<<<<<<<<<<
* for j in range(p.p0):
* new_pen[i * p.n_beats + j] = pen[j, column]
*/
__pyx_t_1 = __pyx_v_p.max_beats;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "radiotool/algorithms/par_build_table.pyx":113
*
* for i in range(p.max_beats):
* for j in range(p.p0): # <<<<<<<<<<<<<<
* new_pen[i * p.n_beats + j] = pen[j, column]
*
*/
__pyx_t_3 = __pyx_v_p.p0;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_j = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":114
* for i in range(p.max_beats):
* for j in range(p.p0):
* new_pen[i * p.n_beats + j] = pen[j, column] # <<<<<<<<<<<<<<
*
* for i in range(p.p0_full, p.all_full):
*/
__pyx_t_5 = __pyx_v_j;
__pyx_t_6 = __pyx_v_column;
__pyx_t_7 = ((__pyx_v_i * __pyx_v_p.n_beats) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_7 * __pyx_v_new_pen.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_pen.data + __pyx_t_5 * __pyx_v_pen.strides[0]) ) + __pyx_t_6 * __pyx_v_pen.strides[1]) )));
}
}
/* "radiotool/algorithms/par_build_table.pyx":116
* new_pen[i * p.n_beats + j] = pen[j, column]
*
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* new_pen[i] = pen[p.p0 + i - p.p0_full, column]
*
*/
__pyx_t_1 = __pyx_v_p.all_full;
for (__pyx_t_2 = __pyx_v_p.p0_full; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "radiotool/algorithms/par_build_table.pyx":117
*
* for i in range(p.p0_full, p.all_full):
* new_pen[i] = pen[p.p0 + i - p.p0_full, column] # <<<<<<<<<<<<<<
*
* #--- CONSTRAINTS ---#
*/
__pyx_t_3 = ((__pyx_v_p.p0 + __pyx_v_i) - __pyx_v_p.p0_full);
__pyx_t_4 = __pyx_v_column;
__pyx_t_8 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_8 * __pyx_v_new_pen.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_pen.data + __pyx_t_3 * __pyx_v_pen.strides[0]) ) + __pyx_t_4 * __pyx_v_pen.strides[1]) )));
}
/* "radiotool/algorithms/par_build_table.pyx":121
* #--- CONSTRAINTS ---#
* # * don't start song in segment beat other than first
* if global_start_l == 0: # <<<<<<<<<<<<<<
* for i in range(p.n_beats, p.p0_full):
* new_pen[i] += p.pen_val
*/
__pyx_t_9 = ((__pyx_v_global_start_l == 0) != 0);
if (__pyx_t_9) {
/* "radiotool/algorithms/par_build_table.pyx":122
* # * don't start song in segment beat other than first
* if global_start_l == 0:
* for i in range(p.n_beats, p.p0_full): # <<<<<<<<<<<<<<
* new_pen[i] += p.pen_val
*
*/
__pyx_t_1 = __pyx_v_p.p0_full;
for (__pyx_t_2 = __pyx_v_p.n_beats; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "radiotool/algorithms/par_build_table.pyx":123
* if global_start_l == 0:
* for i in range(p.n_beats, p.p0_full):
* new_pen[i] += p.pen_val # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_10 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_10 * __pyx_v_new_pen.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L9;
}
__pyx_L9:;
/* "radiotool/algorithms/par_build_table.pyx":109
*
*
* cdef void get_pen_column(double[:, :] pen, int column, double[:] new_pen, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
/* function exit code */
}
/* "radiotool/algorithms/par_build_table.pyx":126
*
*
* cdef void space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice __pyx_v_tc, __Pyx_memviewslice __pyx_v_pen, int __pyx_v_start_beat, int __pyx_v_end_beat, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p, __Pyx_memviewslice __pyx_v_cost, __Pyx_memviewslice __pyx_v_pen_val, __Pyx_memviewslice __pyx_v_vals_col, __Pyx_memviewslice __pyx_v_min_vals) {
int __pyx_v_l;
int __pyx_v_idx;
int __pyx_v_i;
int __pyx_v_beat_seg_i;
int __pyx_v_seg_start_beat;
int __pyx_v_j;
int __pyx_v_full_j;
int __pyx_v_orig_beat_j;
double __pyx_v_minval;
double __pyx_v_tmpval;
double __pyx_v_end_pen;
int __pyx_v_orig_beat_i;
int __pyx_t_1;
__Pyx_memviewslice __pyx_t_2 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
__Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_9;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
long __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
long __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_t_34;
int __pyx_t_35;
int __pyx_t_36;
int __pyx_t_37;
__Pyx_memviewslice __pyx_t_38 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "radiotool/algorithms/par_build_table.pyx":134
*
* # generate initial cost
* if start_beat != -1: # <<<<<<<<<<<<<<
* cost[:] = 99999999 # N.inf
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p)
*/
__pyx_t_1 = ((__pyx_v_start_beat != -1) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":135
* # generate initial cost
* if start_beat != -1:
* cost[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p)
* else:
*/
__pyx_t_3 = -1;
__pyx_t_2.data = __pyx_v_cost.data;
__pyx_t_2.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_2, 0);
__pyx_t_2.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_2.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_2.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_2.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_2.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_2.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
/* "radiotool/algorithms/par_build_table.pyx":136
* if start_beat != -1:
* cost[:] = 99999999 # N.inf
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p) # <<<<<<<<<<<<<<
* else:
* get_pen_column(pen, 0, cost, global_start_l, p)
*/
__pyx_t_3 = __pyx_v_start_beat;
*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_3 * __pyx_v_cost.strides[0]) )) = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_start_beat, 0, __pyx_v_global_start_l, __pyx_v_p);
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":138
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p)
* else:
* get_pen_column(pen, 0, cost, global_start_l, p) # <<<<<<<<<<<<<<
*
* # optimize
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, 0, __pyx_v_cost, __pyx_v_global_start_l, __pyx_v_p);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":141
*
* # optimize
* for l in range(1, pen.shape[1]): # <<<<<<<<<<<<<<
* if l == pen.shape[1] - 1 and end_beat != -1:
* # handle end beat set
*/
__pyx_t_4 = (__pyx_v_pen.shape[1]);
for (__pyx_t_5 = 1; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_l = __pyx_t_5;
/* "radiotool/algorithms/par_build_table.pyx":142
* # optimize
* for l in range(1, pen.shape[1]):
* if l == pen.shape[1] - 1 and end_beat != -1: # <<<<<<<<<<<<<<
* # handle end beat set
* end_pen = get_pen_value(pen, end_beat, l, global_start_l + l, p)
*/
__pyx_t_1 = ((__pyx_v_l == ((__pyx_v_pen.shape[1]) - 1)) != 0);
if (__pyx_t_1) {
__pyx_t_6 = ((__pyx_v_end_beat != -1) != 0);
__pyx_t_7 = __pyx_t_6;
} else {
__pyx_t_7 = __pyx_t_1;
}
if (__pyx_t_7) {
/* "radiotool/algorithms/par_build_table.pyx":144
* if l == pen.shape[1] - 1 and end_beat != -1:
* # handle end beat set
* end_pen = get_pen_value(pen, end_beat, l, global_start_l + l, p) # <<<<<<<<<<<<<<
* get_tc_column(tc, end_beat, vals_col, 0, p)
*
*/
__pyx_v_end_pen = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_end_beat, __pyx_v_l, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":145
* # handle end beat set
* end_pen = get_pen_value(pen, end_beat, l, global_start_l + l, p)
* get_tc_column(tc, end_beat, vals_col, 0, p) # <<<<<<<<<<<<<<
*
* min_vals[:] = 99999999 # N.inf
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_end_beat, __pyx_v_vals_col, 0, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":147
* get_tc_column(tc, end_beat, vals_col, 0, p)
*
* min_vals[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* minval = -1
* for i in range(vals_col.shape[0]):
*/
__pyx_t_9 = -1;
__pyx_t_8.data = __pyx_v_min_vals.data;
__pyx_t_8.memview = __pyx_v_min_vals.memview;
__PYX_INC_MEMVIEW(&__pyx_t_8, 0);
__pyx_t_8.shape[0] = __pyx_v_min_vals.shape[0];
__pyx_t_8.strides[0] = __pyx_v_min_vals.strides[0];
__pyx_t_8.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_8.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_8.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_8.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 0);
/* "radiotool/algorithms/par_build_table.pyx":148
*
* min_vals[:] = 99999999 # N.inf
* minval = -1 # <<<<<<<<<<<<<<
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval:
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":149
* min_vals[:] = 99999999 # N.inf
* minval = -1
* for i in range(vals_col.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval:
* minval = vals_col[i] + cost[i] + end_pen
*/
__pyx_t_10 = (__pyx_v_vals_col.shape[0]);
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9+=1) {
__pyx_v_i = __pyx_t_9;
/* "radiotool/algorithms/par_build_table.pyx":150
* minval = -1
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval: # <<<<<<<<<<<<<<
* minval = vals_col[i] + cost[i] + end_pen
*
*/
__pyx_t_7 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_7) {
__pyx_t_11 = __pyx_v_i;
__pyx_t_12 = __pyx_v_i;
__pyx_t_1 = (((((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_11 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_12 * __pyx_v_cost.strides[0]) )))) + __pyx_v_end_pen) < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_1;
} else {
__pyx_t_6 = __pyx_t_7;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":151
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval:
* minval = vals_col[i] + cost[i] + end_pen # <<<<<<<<<<<<<<
*
* min_vals[end_beat] = minval
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_i;
__pyx_v_minval = (((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_13 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_14 * __pyx_v_cost.strides[0]) )))) + __pyx_v_end_pen);
goto __pyx_L9;
}
__pyx_L9:;
}
/* "radiotool/algorithms/par_build_table.pyx":153
* minval = vals_col[i] + cost[i] + end_pen
*
* min_vals[end_beat] = minval # <<<<<<<<<<<<<<
*
* else:
*/
__pyx_t_9 = __pyx_v_end_beat;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_9 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
goto __pyx_L6;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":156
*
* else:
* get_pen_column(pen, l, pen_val, global_start_l + l, p) # <<<<<<<<<<<<<<
*
* # Based on the nature of our problem
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, __pyx_v_l, __pyx_v_pen_val, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":171
*
* # first beat segment
* for idx in range(p.n_beats): # <<<<<<<<<<<<<<
* # could only get here from the last pause beat
* min_vals[idx] = tc[p.n_beats + p.n_pauses - 1, idx] + pen_val[idx] + cost[p.all_full - 1]
*/
__pyx_t_15 = __pyx_v_p.n_beats;
for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
__pyx_v_idx = __pyx_t_16;
/* "radiotool/algorithms/par_build_table.pyx":173
* for idx in range(p.n_beats):
* # could only get here from the last pause beat
* min_vals[idx] = tc[p.n_beats + p.n_pauses - 1, idx] + pen_val[idx] + cost[p.all_full - 1] # <<<<<<<<<<<<<<
*
* # all other music beat segments
*/
__pyx_t_17 = ((__pyx_v_p.n_beats + __pyx_v_p.n_pauses) - 1);
__pyx_t_18 = __pyx_v_idx;
__pyx_t_19 = __pyx_v_idx;
__pyx_t_20 = (__pyx_v_p.all_full - 1);
__pyx_t_21 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_21 * __pyx_v_min_vals.strides[0]) )) = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_17 * __pyx_v_tc.strides[0]) ) + __pyx_t_18 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_19 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_20 * __pyx_v_cost.strides[0]) ))));
}
/* "radiotool/algorithms/par_build_table.pyx":176
*
* # all other music beat segments
* for idx in range(p.n_beats, p.p0_full): # <<<<<<<<<<<<<<
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats
*/
__pyx_t_15 = __pyx_v_p.p0_full;
for (__pyx_t_16 = __pyx_v_p.n_beats; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
__pyx_v_idx = __pyx_t_16;
/* "radiotool/algorithms/par_build_table.pyx":177
* # all other music beat segments
* for idx in range(p.n_beats, p.p0_full):
* beat_seg_i = idx / p.n_beats # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_v_beat_seg_i = (__pyx_v_idx / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":178
* for idx in range(p.n_beats, p.p0_full):
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # must have gotten here from beat_seg_i - 1
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":183
* # and minimum value will be min cost from
* # another music beat
* seg_start_beat = (beat_seg_i - 1) * p.n_beats # <<<<<<<<<<<<<<
* minval = -1
* for j in range(p.n_beats):
*/
__pyx_v_seg_start_beat = ((__pyx_v_beat_seg_i - 1) * __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":184
* # another music beat
* seg_start_beat = (beat_seg_i - 1) * p.n_beats
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":185
* seg_start_beat = (beat_seg_i - 1) * p.n_beats
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_22 = __pyx_v_p.n_beats;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "radiotool/algorithms/par_build_table.pyx":186
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_24 = __pyx_v_j;
__pyx_t_25 = __pyx_v_orig_beat_i;
__pyx_t_26 = __pyx_v_idx;
__pyx_t_27 = (__pyx_v_seg_start_beat + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_24 * __pyx_v_tc.strides[0]) ) + __pyx_t_25 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_26 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_27 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":187
* for j in range(p.n_beats):
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
*
*/
__pyx_t_6 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_6) {
__pyx_t_7 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_7;
} else {
__pyx_t_1 = __pyx_t_6;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":188
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
*
* min_vals[idx] = minval
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L16;
}
__pyx_L16:;
}
/* "radiotool/algorithms/par_build_table.pyx":190
* minval = tmpval
*
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # first pause beat:
*/
__pyx_t_22 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_22 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":195
* # must have gotten here from
* # min beat <= beat seg <= max beat
* minval = -1 # <<<<<<<<<<<<<<
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats):
* orig_beat_j = full_j % p.n_beats
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":196
* # min beat <= beat seg <= max beat
* minval = -1
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats): # <<<<<<<<<<<<<<
* orig_beat_j = full_j % p.n_beats
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
*/
__pyx_t_15 = (__pyx_v_p.n_beats * __pyx_v_p.max_beats);
for (__pyx_t_16 = (__pyx_v_p.n_beats * (__pyx_v_p.min_beats - 1)); __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
__pyx_v_full_j = __pyx_t_16;
/* "radiotool/algorithms/par_build_table.pyx":197
* minval = -1
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats):
* orig_beat_j = full_j % p.n_beats # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
* if minval == -1 or tmpval < minval:
*/
__pyx_v_orig_beat_j = (__pyx_v_full_j % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":198
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats):
* orig_beat_j = full_j % p.n_beats
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_23 = __pyx_v_orig_beat_j;
__pyx_t_28 = __pyx_v_p.p0;
__pyx_t_29 = __pyx_v_p.p0_full;
__pyx_t_30 = __pyx_v_full_j;
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_23 * __pyx_v_tc.strides[0]) ) + __pyx_t_28 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_29 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_30 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":199
* orig_beat_j = full_j % p.n_beats
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[p.p0_full] = minval
*/
__pyx_t_1 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_1) {
__pyx_t_6 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_7 = __pyx_t_6;
} else {
__pyx_t_7 = __pyx_t_1;
}
if (__pyx_t_7) {
/* "radiotool/algorithms/par_build_table.pyx":200
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[p.p0_full] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L19;
}
__pyx_L19:;
}
/* "radiotool/algorithms/par_build_table.pyx":201
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[p.p0_full] = minval # <<<<<<<<<<<<<<
*
* # other pause beat
*/
__pyx_t_15 = __pyx_v_p.p0_full;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_15 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
/* "radiotool/algorithms/par_build_table.pyx":204
*
* # other pause beat
* for idx in range(p.p0_full + 1, p.all_full): # <<<<<<<<<<<<<<
* orig_beat_i = p.p0 + (idx - p.p0_full)
*
*/
__pyx_t_16 = __pyx_v_p.all_full;
for (__pyx_t_31 = (__pyx_v_p.p0_full + 1); __pyx_t_31 < __pyx_t_16; __pyx_t_31+=1) {
__pyx_v_idx = __pyx_t_31;
/* "radiotool/algorithms/par_build_table.pyx":205
* # other pause beat
* for idx in range(p.p0_full + 1, p.all_full):
* orig_beat_i = p.p0 + (idx - p.p0_full) # <<<<<<<<<<<<<<
*
* # must have gotten here from another pause beat
*/
__pyx_v_orig_beat_i = (__pyx_v_p.p0 + (__pyx_v_idx - __pyx_v_p.p0_full));
/* "radiotool/algorithms/par_build_table.pyx":208
*
* # must have gotten here from another pause beat
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_pauses):
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":209
* # must have gotten here from another pause beat
* minval = -1
* for j in range(p.n_pauses): # <<<<<<<<<<<<<<
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_32 = __pyx_v_p.n_pauses;
for (__pyx_t_33 = 0; __pyx_t_33 < __pyx_t_32; __pyx_t_33+=1) {
__pyx_v_j = __pyx_t_33;
/* "radiotool/algorithms/par_build_table.pyx":210
* minval = -1
* for j in range(p.n_pauses):
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_34 = (__pyx_v_p.p0 + __pyx_v_j);
__pyx_t_35 = __pyx_v_orig_beat_i;
__pyx_t_36 = __pyx_v_idx;
__pyx_t_37 = (__pyx_v_p.p0_full + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_34 * __pyx_v_tc.strides[0]) ) + __pyx_t_35 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_36 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_37 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":211
* for j in range(p.n_pauses):
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[idx] = minval
*/
__pyx_t_7 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_7) {
__pyx_t_1 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_1;
} else {
__pyx_t_6 = __pyx_t_7;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":212
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[idx] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L24;
}
__pyx_L24:;
}
/* "radiotool/algorithms/par_build_table.pyx":213
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* cost[:] = min_vals
*/
__pyx_t_32 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_32 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
}
__pyx_L6:;
/* "radiotool/algorithms/par_build_table.pyx":215
* min_vals[idx] = minval
*
* cost[:] = min_vals # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_16 = -1;
__pyx_t_38.data = __pyx_v_cost.data;
__pyx_t_38.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_38, 0);
__pyx_t_38.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_38.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_38.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_copy_contents(__pyx_v_min_vals, __pyx_t_38, 1, 1, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__PYX_XDEC_MEMVIEW(&__pyx_t_38, 0);
}
/* "radiotool/algorithms/par_build_table.pyx":126
*
*
* cdef void space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_38, 0);
__Pyx_WriteUnraisable("radiotool.algorithms.par_build_table.space_efficient_cost_with_duration_constraint", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_L0:;
}
/* "radiotool/algorithms/par_build_table.pyx":218
*
*
* cdef void backward_space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_backward_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice __pyx_v_tc, __Pyx_memviewslice __pyx_v_pen, int __pyx_v_start_beat, int __pyx_v_end_beat, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p, __Pyx_memviewslice __pyx_v_cost, __Pyx_memviewslice __pyx_v_pen_val, __Pyx_memviewslice __pyx_v_vals_col, __Pyx_memviewslice __pyx_v_min_vals) {
int __pyx_v_l;
int __pyx_v_idx;
int __pyx_v_i;
int __pyx_v_beat_seg_i;
int __pyx_v_seg_start_beat;
int __pyx_v_j;
double __pyx_v_minval;
double __pyx_v_tmpval;
double __pyx_v_start_pen;
int __pyx_v_orig_beat_i;
int __pyx_t_1;
__Pyx_memviewslice __pyx_t_2 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
long __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_t_34;
int __pyx_t_35;
int __pyx_t_36;
int __pyx_t_37;
int __pyx_t_38;
int __pyx_t_39;
int __pyx_t_40;
int __pyx_t_41;
long __pyx_t_42;
int __pyx_t_43;
long __pyx_t_44;
__Pyx_memviewslice __pyx_t_45 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "radiotool/algorithms/par_build_table.pyx":226
*
* # generate initial cost
* if end_beat != -1: # <<<<<<<<<<<<<<
* cost[:] = 99999999 # N.inf
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p)
*/
__pyx_t_1 = ((__pyx_v_end_beat != -1) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":227
* # generate initial cost
* if end_beat != -1:
* cost[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p)
* else:
*/
__pyx_t_3 = -1;
__pyx_t_2.data = __pyx_v_cost.data;
__pyx_t_2.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_2, 0);
__pyx_t_2.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_2.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_2.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_2.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_2.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_2.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
/* "radiotool/algorithms/par_build_table.pyx":228
* if end_beat != -1:
* cost[:] = 99999999 # N.inf
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p) # <<<<<<<<<<<<<<
* else:
* get_pen_column(pen, pen.shape[1] - 1, cost, global_start_l + pen.shape[1] - 1, p)
*/
__pyx_t_3 = __pyx_v_end_beat;
*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_3 * __pyx_v_cost.strides[0]) )) = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_end_beat, ((__pyx_v_pen.shape[1]) - 1), ((__pyx_v_global_start_l + (__pyx_v_pen.shape[1])) - 1), __pyx_v_p);
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":230
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p)
* else:
* get_pen_column(pen, pen.shape[1] - 1, cost, global_start_l + pen.shape[1] - 1, p) # <<<<<<<<<<<<<<
*
* # optimize
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, ((__pyx_v_pen.shape[1]) - 1), __pyx_v_cost, ((__pyx_v_global_start_l + (__pyx_v_pen.shape[1])) - 1), __pyx_v_p);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":233
*
* # optimize
* for l in xrange(pen.shape[1] - 2, -1, -1): # <<<<<<<<<<<<<<
* if l == 0 and start_beat != -1:
* # handle start beat set
*/
for (__pyx_t_4 = ((__pyx_v_pen.shape[1]) - 2); __pyx_t_4 > -1; __pyx_t_4-=1) {
__pyx_v_l = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":234
* # optimize
* for l in xrange(pen.shape[1] - 2, -1, -1):
* if l == 0 and start_beat != -1: # <<<<<<<<<<<<<<
* # handle start beat set
* start_pen = get_pen_value(pen, start_beat, l, global_start_l + l, p)
*/
__pyx_t_1 = ((__pyx_v_l == 0) != 0);
if (__pyx_t_1) {
__pyx_t_5 = ((__pyx_v_start_beat != -1) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_1;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":236
* if l == 0 and start_beat != -1:
* # handle start beat set
* start_pen = get_pen_value(pen, start_beat, l, global_start_l + l, p) # <<<<<<<<<<<<<<
* get_tc_column(tc, start_beat, vals_col, 1, p)
*
*/
__pyx_v_start_pen = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_start_beat, __pyx_v_l, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":237
* # handle start beat set
* start_pen = get_pen_value(pen, start_beat, l, global_start_l + l, p)
* get_tc_column(tc, start_beat, vals_col, 1, p) # <<<<<<<<<<<<<<
*
* min_vals[:] = 99999999 # N.inf
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_start_beat, __pyx_v_vals_col, 1, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":239
* get_tc_column(tc, start_beat, vals_col, 1, p)
*
* min_vals[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* minval = -1
* for i in range(vals_col.shape[0]):
*/
__pyx_t_8 = -1;
__pyx_t_7.data = __pyx_v_min_vals.data;
__pyx_t_7.memview = __pyx_v_min_vals.memview;
__PYX_INC_MEMVIEW(&__pyx_t_7, 0);
__pyx_t_7.shape[0] = __pyx_v_min_vals.shape[0];
__pyx_t_7.strides[0] = __pyx_v_min_vals.strides[0];
__pyx_t_7.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_7.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_7.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_7.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
/* "radiotool/algorithms/par_build_table.pyx":240
*
* min_vals[:] = 99999999 # N.inf
* minval = -1 # <<<<<<<<<<<<<<
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval:
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":241
* min_vals[:] = 99999999 # N.inf
* minval = -1
* for i in range(vals_col.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval:
* minval = vals_col[i] + cost[i] + start_pen
*/
__pyx_t_9 = (__pyx_v_vals_col.shape[0]);
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "radiotool/algorithms/par_build_table.pyx":242
* minval = -1
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval: # <<<<<<<<<<<<<<
* minval = vals_col[i] + cost[i] + start_pen
*
*/
__pyx_t_6 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_6) {
__pyx_t_10 = __pyx_v_i;
__pyx_t_11 = __pyx_v_i;
__pyx_t_1 = (((((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_10 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_11 * __pyx_v_cost.strides[0]) )))) + __pyx_v_start_pen) < __pyx_v_minval) != 0);
__pyx_t_5 = __pyx_t_1;
} else {
__pyx_t_5 = __pyx_t_6;
}
if (__pyx_t_5) {
/* "radiotool/algorithms/par_build_table.pyx":243
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval:
* minval = vals_col[i] + cost[i] + start_pen # <<<<<<<<<<<<<<
*
* min_vals[start_beat] = minval
*/
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_i;
__pyx_v_minval = (((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_12 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_13 * __pyx_v_cost.strides[0]) )))) + __pyx_v_start_pen);
goto __pyx_L9;
}
__pyx_L9:;
}
/* "radiotool/algorithms/par_build_table.pyx":245
* minval = vals_col[i] + cost[i] + start_pen
*
* min_vals[start_beat] = minval # <<<<<<<<<<<<<<
*
* else:
*/
__pyx_t_8 = __pyx_v_start_beat;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_8 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
goto __pyx_L6;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":248
*
* else:
* get_pen_column(pen, l, pen_val, global_start_l + l, p) # <<<<<<<<<<<<<<
*
* # categories of beats we could be at before this one
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, __pyx_v_l, __pyx_v_pen_val, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":253
*
* # beat segment before min_beat
* for idx in range(p.n_beats * (p.min_beats - 1)): # <<<<<<<<<<<<<<
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats
*/
__pyx_t_14 = (__pyx_v_p.n_beats * (__pyx_v_p.min_beats - 1));
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_idx = __pyx_t_15;
/* "radiotool/algorithms/par_build_table.pyx":254
* # beat segment before min_beat
* for idx in range(p.n_beats * (p.min_beats - 1)):
* beat_seg_i = idx / p.n_beats # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_v_beat_seg_i = (__pyx_v_idx / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":255
* for idx in range(p.n_beats * (p.min_beats - 1)):
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # could only be going to beat_seg_i + 1
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":258
*
* # could only be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats # <<<<<<<<<<<<<<
* minval = -1
* for j in range(p.n_beats):
*/
__pyx_v_seg_start_beat = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":259
* # could only be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":260
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_16 = __pyx_v_p.n_beats;
for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) {
__pyx_v_j = __pyx_t_17;
/* "radiotool/algorithms/par_build_table.pyx":261
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_18 = __pyx_v_orig_beat_i;
__pyx_t_19 = __pyx_v_j;
__pyx_t_20 = __pyx_v_idx;
__pyx_t_21 = (__pyx_v_seg_start_beat + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_18 * __pyx_v_tc.strides[0]) ) + __pyx_t_19 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_20 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_21 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":262
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
*
*/
__pyx_t_5 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_5) {
__pyx_t_6 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_6;
} else {
__pyx_t_1 = __pyx_t_5;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":263
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
*
* min_vals[idx] = minval
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L14;
}
__pyx_L14:;
}
/* "radiotool/algorithms/par_build_table.pyx":265
* minval = tmpval
*
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # beat segment between min beat and max beat
*/
__pyx_t_16 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_16 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":268
*
* # beat segment between min beat and max beat
* for idx in range(p.n_beats * (p.min_beats - 1), p.n_beats * (p.max_beats - 1)): # <<<<<<<<<<<<<<
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats
*/
__pyx_t_14 = (__pyx_v_p.n_beats * (__pyx_v_p.max_beats - 1));
for (__pyx_t_15 = (__pyx_v_p.n_beats * (__pyx_v_p.min_beats - 1)); __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_idx = __pyx_t_15;
/* "radiotool/algorithms/par_build_table.pyx":269
* # beat segment between min beat and max beat
* for idx in range(p.n_beats * (p.min_beats - 1), p.n_beats * (p.max_beats - 1)):
* beat_seg_i = idx / p.n_beats # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_v_beat_seg_i = (__pyx_v_idx / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":270
* for idx in range(p.n_beats * (p.min_beats - 1), p.n_beats * (p.max_beats - 1)):
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # could be going to beat_seg_i + 1
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":273
*
* # could be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats # <<<<<<<<<<<<<<
* minval = -1
* for j in range(p.n_beats):
*/
__pyx_v_seg_start_beat = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":274
* # could be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":275
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_17 = __pyx_v_p.n_beats;
for (__pyx_t_22 = 0; __pyx_t_22 < __pyx_t_17; __pyx_t_22+=1) {
__pyx_v_j = __pyx_t_22;
/* "radiotool/algorithms/par_build_table.pyx":276
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_23 = __pyx_v_orig_beat_i;
__pyx_t_24 = __pyx_v_j;
__pyx_t_25 = __pyx_v_idx;
__pyx_t_26 = (__pyx_v_seg_start_beat + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_23 * __pyx_v_tc.strides[0]) ) + __pyx_t_24 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_25 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_26 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":277
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* # or could be going to first pause beat
*/
__pyx_t_1 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_1) {
__pyx_t_5 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_1;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":278
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* # or could be going to first pause beat
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full]
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L19;
}
__pyx_L19:;
}
/* "radiotool/algorithms/par_build_table.pyx":280
* minval = tmpval
* # or could be going to first pause beat
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_17 = __pyx_v_orig_beat_i;
__pyx_t_22 = __pyx_v_p.p0;
__pyx_t_27 = __pyx_v_idx;
__pyx_t_28 = __pyx_v_p.p0_full;
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_17 * __pyx_v_tc.strides[0]) ) + __pyx_t_22 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_27 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_28 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":281
* # or could be going to first pause beat
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
*
*/
__pyx_t_6 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_6) {
__pyx_t_1 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_5 = __pyx_t_1;
} else {
__pyx_t_5 = __pyx_t_6;
}
if (__pyx_t_5) {
/* "radiotool/algorithms/par_build_table.pyx":282
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
*
* min_vals[idx] = minval
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L20;
}
__pyx_L20:;
/* "radiotool/algorithms/par_build_table.pyx":284
* minval = tmpval
*
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # max beat segment
*/
__pyx_t_29 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_29 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":287
*
* # max beat segment
* for idx in range(p.n_beats * (p.max_beats - 1), p.n_beats * p.max_beats): # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_t_15 = (__pyx_v_p.n_beats * __pyx_v_p.max_beats);
for (__pyx_t_30 = (__pyx_v_p.n_beats * (__pyx_v_p.max_beats - 1)); __pyx_t_30 < __pyx_t_15; __pyx_t_30+=1) {
__pyx_v_idx = __pyx_t_30;
/* "radiotool/algorithms/par_build_table.pyx":288
* # max beat segment
* for idx in range(p.n_beats * (p.max_beats - 1), p.n_beats * p.max_beats):
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # must be going to first pause beat
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":291
*
* # must be going to first pause beat
* min_vals[idx] = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full] # <<<<<<<<<<<<<<
*
* # pause beats except the last one
*/
__pyx_t_31 = __pyx_v_orig_beat_i;
__pyx_t_32 = __pyx_v_p.p0;
__pyx_t_33 = __pyx_v_idx;
__pyx_t_34 = __pyx_v_p.p0_full;
__pyx_t_35 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_35 * __pyx_v_min_vals.strides[0]) )) = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_31 * __pyx_v_tc.strides[0]) ) + __pyx_t_32 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_33 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_34 * __pyx_v_cost.strides[0]) ))));
}
/* "radiotool/algorithms/par_build_table.pyx":294
*
* # pause beats except the last one
* for idx in range(p.p0_full, p.all_full - 1): # <<<<<<<<<<<<<<
* orig_beat_i = p.p0 + (idx - p.p0_full)
*
*/
__pyx_t_14 = (__pyx_v_p.all_full - 1);
for (__pyx_t_15 = __pyx_v_p.p0_full; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_idx = __pyx_t_15;
/* "radiotool/algorithms/par_build_table.pyx":295
* # pause beats except the last one
* for idx in range(p.p0_full, p.all_full - 1):
* orig_beat_i = p.p0 + (idx - p.p0_full) # <<<<<<<<<<<<<<
*
* # could only be going to another pause beat
*/
__pyx_v_orig_beat_i = (__pyx_v_p.p0 + (__pyx_v_idx - __pyx_v_p.p0_full));
/* "radiotool/algorithms/par_build_table.pyx":298
*
* # could only be going to another pause beat
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_pauses):
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":299
* # could only be going to another pause beat
* minval = -1
* for j in range(p.n_pauses): # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_30 = __pyx_v_p.n_pauses;
for (__pyx_t_36 = 0; __pyx_t_36 < __pyx_t_30; __pyx_t_36+=1) {
__pyx_v_j = __pyx_t_36;
/* "radiotool/algorithms/par_build_table.pyx":300
* minval = -1
* for j in range(p.n_pauses):
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_37 = __pyx_v_orig_beat_i;
__pyx_t_38 = (__pyx_v_p.p0 + __pyx_v_j);
__pyx_t_39 = __pyx_v_idx;
__pyx_t_40 = (__pyx_v_p.p0_full + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_37 * __pyx_v_tc.strides[0]) ) + __pyx_t_38 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_39 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_40 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":301
* for j in range(p.n_pauses):
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[idx] = minval
*/
__pyx_t_5 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_5) {
__pyx_t_6 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_6;
} else {
__pyx_t_1 = __pyx_t_5;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":302
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[idx] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L27;
}
__pyx_L27:;
}
/* "radiotool/algorithms/par_build_table.pyx":303
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # last pause beat
*/
__pyx_t_30 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_30 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":306
*
* # last pause beat
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":307
* # last pause beat
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_15 = __pyx_v_p.n_beats;
for (__pyx_t_36 = 0; __pyx_t_36 < __pyx_t_15; __pyx_t_36+=1) {
__pyx_v_j = __pyx_t_36;
/* "radiotool/algorithms/par_build_table.pyx":308
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_14 = ((__pyx_v_p.p0 + __pyx_v_p.n_pauses) - 1);
__pyx_t_41 = __pyx_v_j;
__pyx_t_42 = (__pyx_v_p.all_full - 1);
__pyx_t_43 = __pyx_v_j;
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_14 * __pyx_v_tc.strides[0]) ) + __pyx_t_41 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_42 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_43 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":309
* for j in range(p.n_beats):
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[p.all_full - 1] = minval
*/
__pyx_t_1 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_1) {
__pyx_t_5 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_1;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":310
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[p.all_full - 1] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L30;
}
__pyx_L30:;
}
/* "radiotool/algorithms/par_build_table.pyx":311
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[p.all_full - 1] = minval # <<<<<<<<<<<<<<
*
* cost[:] = min_vals
*/
__pyx_t_44 = (__pyx_v_p.all_full - 1);
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_44 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
__pyx_L6:;
/* "radiotool/algorithms/par_build_table.pyx":313
* min_vals[p.all_full - 1] = minval
*
* cost[:] = min_vals # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_15 = -1;
__pyx_t_45.data = __pyx_v_cost.data;
__pyx_t_45.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_45, 0);
__pyx_t_45.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_45.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_45.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_copy_contents(__pyx_v_min_vals, __pyx_t_45, 1, 1, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__PYX_XDEC_MEMVIEW(&__pyx_t_45, 0);
}
/* "radiotool/algorithms/par_build_table.pyx":218
*
*
* cdef void backward_space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_45, 0);
__Pyx_WriteUnraisable("radiotool.algorithms.par_build_table.backward_space_efficient_cost_with_duration_constraint", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_L0:;
}
/* "radiotool/algorithms/par_build_table.pyx":316
*
*
* cdef inline int minimum(double[:] buffer1, double[:] buffer2) nogil: # <<<<<<<<<<<<<<
* cdef int idx
* cdef int opt_i = 0
*/
static CYTHON_INLINE int __pyx_f_9radiotool_10algorithms_15par_build_table_minimum(__Pyx_memviewslice __pyx_v_buffer1, __Pyx_memviewslice __pyx_v_buffer2) {
int __pyx_v_idx;
int __pyx_v_opt_i;
double __pyx_v_minval;
int __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
/* "radiotool/algorithms/par_build_table.pyx":318
* cdef inline int minimum(double[:] buffer1, double[:] buffer2) nogil:
* cdef int idx
* cdef int opt_i = 0 # <<<<<<<<<<<<<<
* cdef double minval = buffer1[0] + buffer2[0]
* for idx in range(1, buffer1.shape[0]):
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":319
* cdef int idx
* cdef int opt_i = 0
* cdef double minval = buffer1[0] + buffer2[0] # <<<<<<<<<<<<<<
* for idx in range(1, buffer1.shape[0]):
* if buffer1[idx] + buffer2[idx] < minval:
*/
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_buffer1.data + __pyx_t_1 * __pyx_v_buffer1.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_buffer2.data + __pyx_t_2 * __pyx_v_buffer2.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":320
* cdef int opt_i = 0
* cdef double minval = buffer1[0] + buffer2[0]
* for idx in range(1, buffer1.shape[0]): # <<<<<<<<<<<<<<
* if buffer1[idx] + buffer2[idx] < minval:
* minval = buffer1[idx] + buffer2[idx]
*/
__pyx_t_3 = (__pyx_v_buffer1.shape[0]);
for (__pyx_t_4 = 1; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":321
* cdef double minval = buffer1[0] + buffer2[0]
* for idx in range(1, buffer1.shape[0]):
* if buffer1[idx] + buffer2[idx] < minval: # <<<<<<<<<<<<<<
* minval = buffer1[idx] + buffer2[idx]
* opt_i = idx
*/
__pyx_t_5 = __pyx_v_idx;
__pyx_t_6 = __pyx_v_idx;
__pyx_t_7 = ((((*((double *) ( /* dim=0 */ (__pyx_v_buffer1.data + __pyx_t_5 * __pyx_v_buffer1.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_buffer2.data + __pyx_t_6 * __pyx_v_buffer2.strides[0]) )))) < __pyx_v_minval) != 0);
if (__pyx_t_7) {
/* "radiotool/algorithms/par_build_table.pyx":322
* for idx in range(1, buffer1.shape[0]):
* if buffer1[idx] + buffer2[idx] < minval:
* minval = buffer1[idx] + buffer2[idx] # <<<<<<<<<<<<<<
* opt_i = idx
*
*/
__pyx_t_8 = __pyx_v_idx;
__pyx_t_9 = __pyx_v_idx;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_buffer1.data + __pyx_t_8 * __pyx_v_buffer1.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_buffer2.data + __pyx_t_9 * __pyx_v_buffer2.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":323
* if buffer1[idx] + buffer2[idx] < minval:
* minval = buffer1[idx] + buffer2[idx]
* opt_i = idx # <<<<<<<<<<<<<<
*
* return opt_i
*/
__pyx_v_opt_i = __pyx_v_idx;
goto __pyx_L5;
}
__pyx_L5:;
}
/* "radiotool/algorithms/par_build_table.pyx":325
* opt_i = idx
*
* return opt_i # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_opt_i;
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":316
*
*
* cdef inline int minimum(double[:] buffer1, double[:] buffer2) nogil: # <<<<<<<<<<<<<<
* cdef int idx
* cdef int opt_i = 0
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "radiotool/algorithms/par_build_table.pyx":328
*
*
* cdef void divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int offset,
* int[:] global_path, Params p,
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__Pyx_memviewslice __pyx_v_tc, __Pyx_memviewslice __pyx_v_pen, int __pyx_v_start_beat, int __pyx_v_end_beat, int __pyx_v_offset, __Pyx_memviewslice __pyx_v_global_path, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p, __Pyx_memviewslice __pyx_v_f, __Pyx_memviewslice __pyx_v_g, __Pyx_memviewslice __pyx_v_mv1, __Pyx_memviewslice __pyx_v_mv2, __Pyx_memviewslice __pyx_v_mv3, __Pyx_memviewslice __pyx_v_mv4, __Pyx_memviewslice __pyx_v_mv5, __Pyx_memviewslice __pyx_v_mv6) {
int __pyx_v_l;
__Pyx_memviewslice __pyx_v_new_pen = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_tc_column = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_i;
int __pyx_v_opt_i;
int __pyx_v_l_over_2;
double __pyx_v_minval;
int __pyx_v_prange_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
long __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
long __pyx_t_17;
long __pyx_t_18;
long __pyx_t_19;
__Pyx_memviewslice __pyx_t_20 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_21;
__Pyx_memviewslice __pyx_t_22 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
__Pyx_memviewslice __pyx_t_27 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_28;
__Pyx_memviewslice __pyx_t_29 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("divide_and_conquer_cost_and_path", 0);
/* "radiotool/algorithms/par_build_table.pyx":334
* double[:] mv3, double[:] mv4, double[:] mv5, double[:] mv6):
*
* cdef int l = pen.shape[1] # out beats # <<<<<<<<<<<<<<
* cdef double[:] new_pen, tc_column
* cdef int i, opt_i, l_over_2, f_done, g_done
*/
__pyx_v_l = (__pyx_v_pen.shape[1]);
/* "radiotool/algorithms/par_build_table.pyx":337
* cdef double[:] new_pen, tc_column
* cdef int i, opt_i, l_over_2, f_done, g_done
* cdef double minval = -1.0 # <<<<<<<<<<<<<<
* cdef int prange_i, stride
*
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":353
* # opt_i_arr = ar2
*
* if l == 0: # <<<<<<<<<<<<<<
* pass
* elif l == 1:
*/
__pyx_t_1 = ((__pyx_v_l == 0) != 0);
if (__pyx_t_1) {
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":355
* if l == 0:
* pass
* elif l == 1: # <<<<<<<<<<<<<<
* pass
* elif l == 2 and start_beat != -1 and end_beat != -1:
*/
__pyx_t_1 = ((__pyx_v_l == 1) != 0);
if (__pyx_t_1) {
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":357
* elif l == 1:
* pass
* elif l == 2 and start_beat != -1 and end_beat != -1: # <<<<<<<<<<<<<<
* pass
* elif l == 2 and start_beat != -1:
*/
__pyx_t_1 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_1) {
__pyx_t_2 = ((__pyx_v_start_beat != -1) != 0);
if (__pyx_t_2) {
__pyx_t_3 = ((__pyx_v_end_beat != -1) != 0);
__pyx_t_4 = __pyx_t_3;
} else {
__pyx_t_4 = __pyx_t_2;
}
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":359
* elif l == 2 and start_beat != -1 and end_beat != -1:
* pass
* elif l == 2 and start_beat != -1: # <<<<<<<<<<<<<<
* new_pen = mv1
* get_pen_column(pen, 1, new_pen, offset, p)
*/
__pyx_t_2 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_2) {
__pyx_t_1 = ((__pyx_v_start_beat != -1) != 0);
__pyx_t_4 = __pyx_t_1;
} else {
__pyx_t_4 = __pyx_t_2;
}
if (__pyx_t_4) {
/* "radiotool/algorithms/par_build_table.pyx":360
* pass
* elif l == 2 and start_beat != -1:
* new_pen = mv1 # <<<<<<<<<<<<<<
* get_pen_column(pen, 1, new_pen, offset, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv1, 0);
__pyx_v_new_pen = __pyx_v_mv1;
/* "radiotool/algorithms/par_build_table.pyx":361
* elif l == 2 and start_beat != -1:
* new_pen = mv1
* get_pen_column(pen, 1, new_pen, offset, p) # <<<<<<<<<<<<<<
*
* tc_column = mv2
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, 1, __pyx_v_new_pen, __pyx_v_offset, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":363
* get_pen_column(pen, 1, new_pen, offset, p)
*
* tc_column = mv2 # <<<<<<<<<<<<<<
* get_tc_column(tc, start_beat, tc_column, 1, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv2, 0);
__pyx_v_tc_column = __pyx_v_mv2;
/* "radiotool/algorithms/par_build_table.pyx":364
*
* tc_column = mv2
* get_tc_column(tc, start_beat, tc_column, 1, p) # <<<<<<<<<<<<<<
*
* global_path[offset] = start_beat
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_start_beat, __pyx_v_tc_column, 1, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":366
* get_tc_column(tc, start_beat, tc_column, 1, p)
*
* global_path[offset] = start_beat # <<<<<<<<<<<<<<
*
* minval = -1.0
*/
__pyx_t_5 = __pyx_v_offset;
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_5 * __pyx_v_global_path.strides[0]) )) = __pyx_v_start_beat;
/* "radiotool/algorithms/par_build_table.pyx":368
* global_path[offset] = start_beat
*
* minval = -1.0 # <<<<<<<<<<<<<<
* opt_i = 0
* for i in range(tc_column.shape[0]):
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":369
*
* minval = -1.0
* opt_i = 0 # <<<<<<<<<<<<<<
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":370
* minval = -1.0
* opt_i = 0
* for i in range(tc_column.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
*/
__pyx_t_6 = (__pyx_v_tc_column.shape[0]);
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "radiotool/algorithms/par_build_table.pyx":371
* opt_i = 0
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval: # <<<<<<<<<<<<<<
* minval = tc_column[i] + new_pen[i]
* opt_i = i
*/
__pyx_t_4 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_4) {
__pyx_t_8 = __pyx_v_i;
__pyx_t_9 = __pyx_v_i;
__pyx_t_2 = ((((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_8 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_9 * __pyx_v_new_pen.strides[0]) )))) < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_2;
} else {
__pyx_t_1 = __pyx_t_4;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":372
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i] # <<<<<<<<<<<<<<
* opt_i = i
*
*/
__pyx_t_10 = __pyx_v_i;
__pyx_t_11 = __pyx_v_i;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_10 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_11 * __pyx_v_new_pen.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":373
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
* opt_i = i # <<<<<<<<<<<<<<
*
* # print "$ setting time %d to %d" % (offset + 1, opt_i)
*/
__pyx_v_opt_i = __pyx_v_i;
goto __pyx_L6;
}
__pyx_L6:;
}
/* "radiotool/algorithms/par_build_table.pyx":376
*
* # print "$ setting time %d to %d" % (offset + 1, opt_i)
* global_path[offset + 1] = opt_i # <<<<<<<<<<<<<<
*
* # global_path_cost[offset + 1] = N.min(tc_column + new_pen)
*/
__pyx_t_12 = (__pyx_v_offset + 1);
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_12 * __pyx_v_global_path.strides[0]) )) = __pyx_v_opt_i;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":379
*
* # global_path_cost[offset + 1] = N.min(tc_column + new_pen)
* elif l == 2 and end_beat != -1: # <<<<<<<<<<<<<<
* new_pen = mv1
* get_pen_column(pen, 0, new_pen, offset, p)
*/
__pyx_t_1 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_1) {
__pyx_t_4 = ((__pyx_v_end_beat != -1) != 0);
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
/* "radiotool/algorithms/par_build_table.pyx":380
* # global_path_cost[offset + 1] = N.min(tc_column + new_pen)
* elif l == 2 and end_beat != -1:
* new_pen = mv1 # <<<<<<<<<<<<<<
* get_pen_column(pen, 0, new_pen, offset, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv1, 0);
__pyx_v_new_pen = __pyx_v_mv1;
/* "radiotool/algorithms/par_build_table.pyx":381
* elif l == 2 and end_beat != -1:
* new_pen = mv1
* get_pen_column(pen, 0, new_pen, offset, p) # <<<<<<<<<<<<<<
*
* tc_column = mv2
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, 0, __pyx_v_new_pen, __pyx_v_offset, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":383
* get_pen_column(pen, 0, new_pen, offset, p)
*
* tc_column = mv2 # <<<<<<<<<<<<<<
* get_tc_column(tc, end_beat, tc_column, 0, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv2, 0);
__pyx_v_tc_column = __pyx_v_mv2;
/* "radiotool/algorithms/par_build_table.pyx":384
*
* tc_column = mv2
* get_tc_column(tc, end_beat, tc_column, 0, p) # <<<<<<<<<<<<<<
*
* minval = -1.0
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_end_beat, __pyx_v_tc_column, 0, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":386
* get_tc_column(tc, end_beat, tc_column, 0, p)
*
* minval = -1.0 # <<<<<<<<<<<<<<
* opt_i = 0
* for i in range(tc_column.shape[0]):
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":387
*
* minval = -1.0
* opt_i = 0 # <<<<<<<<<<<<<<
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":388
* minval = -1.0
* opt_i = 0
* for i in range(tc_column.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
*/
__pyx_t_6 = (__pyx_v_tc_column.shape[0]);
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "radiotool/algorithms/par_build_table.pyx":389
* opt_i = 0
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval: # <<<<<<<<<<<<<<
* minval = tc_column[i] + new_pen[i]
* opt_i = i
*/
__pyx_t_2 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_2) {
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_i;
__pyx_t_1 = ((((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_13 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_14 * __pyx_v_new_pen.strides[0]) )))) < __pyx_v_minval) != 0);
__pyx_t_4 = __pyx_t_1;
} else {
__pyx_t_4 = __pyx_t_2;
}
if (__pyx_t_4) {
/* "radiotool/algorithms/par_build_table.pyx":390
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i] # <<<<<<<<<<<<<<
* opt_i = i
*
*/
__pyx_t_15 = __pyx_v_i;
__pyx_t_16 = __pyx_v_i;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_15 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_16 * __pyx_v_new_pen.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":391
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
* opt_i = i # <<<<<<<<<<<<<<
*
* # print "* setting time %d to %d" % (offset, opt_i)
*/
__pyx_v_opt_i = __pyx_v_i;
goto __pyx_L9;
}
__pyx_L9:;
}
/* "radiotool/algorithms/par_build_table.pyx":394
*
* # print "* setting time %d to %d" % (offset, opt_i)
* global_path[offset] = opt_i # <<<<<<<<<<<<<<
* global_path[offset + 1] = end_beat
*
*/
__pyx_t_7 = __pyx_v_offset;
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_7 * __pyx_v_global_path.strides[0]) )) = __pyx_v_opt_i;
/* "radiotool/algorithms/par_build_table.pyx":395
* # print "* setting time %d to %d" % (offset, opt_i)
* global_path[offset] = opt_i
* global_path[offset + 1] = end_beat # <<<<<<<<<<<<<<
*
* # global_path_cost[offset] = N.min(tc_column + new_pen)
*/
__pyx_t_17 = (__pyx_v_offset + 1);
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_17 * __pyx_v_global_path.strides[0]) )) = __pyx_v_end_beat;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":398
*
* # global_path_cost[offset] = N.min(tc_column + new_pen)
* elif l == 2: # <<<<<<<<<<<<<<
* pass
* # opt_path = cost_and_path(tc, pen, start_beat, end_beat)
*/
__pyx_t_4 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_4) {
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":404
*
* else:
* l_over_2 = l / 2 # <<<<<<<<<<<<<<
*
* # print "forward. start beat:", start_beat, "offset:", offset, "length:", pen[:, :l_over_2 + 1].shape[1]
*/
__pyx_v_l_over_2 = (__pyx_v_l / 2);
/* "radiotool/algorithms/par_build_table.pyx":409
* # print "backwrd. end beat:", end_beat, "offset:", offset + l_over_2, "length:", pen[:, l_over_2:].shape[1]
*
* for prange_i in parallel.prange(2, nogil=True): # <<<<<<<<<<<<<<
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint(
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
if (1 == 0) abort();
{
int __pyx_parallel_temp0 = 0xbad0bad0;
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_19 = (2 - 0) / 1;
if (__pyx_t_19 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_21) firstprivate(__pyx_t_22, __pyx_t_20) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_prange_i) lastprivate(__pyx_v_prange_i)
#endif /* _OPENMP */
for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_19; __pyx_t_18++){
if (__pyx_parallel_why < 2)
{
__pyx_v_prange_i = 0 + 1 * __pyx_t_18;
/* "radiotool/algorithms/par_build_table.pyx":413
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1: # <<<<<<<<<<<<<<
* backward_space_efficient_cost_with_duration_constraint(
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6)
*/
switch (__pyx_v_prange_i) {
/* "radiotool/algorithms/par_build_table.pyx":410
*
* for prange_i in parallel.prange(2, nogil=True):
* if prange_i == 0: # <<<<<<<<<<<<<<
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
*/
case 0:
/* "radiotool/algorithms/par_build_table.pyx":412
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3) # <<<<<<<<<<<<<<
* elif prange_i == 1:
* backward_space_efficient_cost_with_duration_constraint(
*/
__pyx_t_21 = -1;
__pyx_t_20.data = __pyx_v_pen.data;
__pyx_t_20.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_20, 0);
__pyx_t_20.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_20.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_20.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_20,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_21,
0,
(__pyx_v_l_over_2 + 1),
0,
0,
1,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L15_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_space_efficient_cost_with_duration_constraint(__pyx_v_tc, __pyx_t_20, __pyx_v_start_beat, -1, __pyx_v_offset, __pyx_v_p, __pyx_v_f, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3);
/* "radiotool/algorithms/par_build_table.pyx":411
* for prange_i in parallel.prange(2, nogil=True):
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1:
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_20, 0);
break;
/* "radiotool/algorithms/par_build_table.pyx":413
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1: # <<<<<<<<<<<<<<
* backward_space_efficient_cost_with_duration_constraint(
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6)
*/
case 1:
/* "radiotool/algorithms/par_build_table.pyx":415
* elif prange_i == 1:
* backward_space_efficient_cost_with_duration_constraint(
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6) # <<<<<<<<<<<<<<
*
* # print "finding minimum"
*/
__pyx_t_21 = -1;
__pyx_t_22.data = __pyx_v_pen.data;
__pyx_t_22.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_22, 0);
__pyx_t_22.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_22.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_22.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_22,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_21,
__pyx_v_l_over_2,
0,
0,
1,
0,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 415; __pyx_clineno = __LINE__; goto __pyx_L15_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_backward_space_efficient_cost_with_duration_constraint(__pyx_v_tc, __pyx_t_22, -1, __pyx_v_end_beat, (__pyx_v_offset + __pyx_v_l_over_2), __pyx_v_p, __pyx_v_g, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":414
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1:
* backward_space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6)
*
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_22, 0);
break;
default: break;
}
goto __pyx_L18;
__pyx_L15_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L17;
__pyx_L17:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_prange_i;
}
__pyx_L18:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__PYX_XDEC_MEMVIEW(&__pyx_t_22, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_20, 0);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_prange_i = __pyx_parallel_temp0;
switch (__pyx_parallel_why) {
case 3: goto __pyx_L10_return;
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L11_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "radiotool/algorithms/par_build_table.pyx":409
* # print "backwrd. end beat:", end_beat, "offset:", offset + l_over_2, "length:", pen[:, l_over_2:].shape[1]
*
* for prange_i in parallel.prange(2, nogil=True): # <<<<<<<<<<<<<<
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint(
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L12;
}
__pyx_L10_return: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L0;
}
__pyx_L11_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L12:;
}
}
/* "radiotool/algorithms/par_build_table.pyx":450
*
* # ## -- OLD WAY -- ##
* minval = -1.0 # <<<<<<<<<<<<<<
* opt_i = 0
* for i in range(f.shape[0]):
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":451
* # ## -- OLD WAY -- ##
* minval = -1.0
* opt_i = 0 # <<<<<<<<<<<<<<
* for i in range(f.shape[0]):
* if minval == -1.0 or f[i] + g[i] < minval:
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":452
* minval = -1.0
* opt_i = 0
* for i in range(f.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1.0 or f[i] + g[i] < minval:
* minval = f[i] + g[i]
*/
__pyx_t_6 = (__pyx_v_f.shape[0]);
for (__pyx_t_21 = 0; __pyx_t_21 < __pyx_t_6; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":453
* opt_i = 0
* for i in range(f.shape[0]):
* if minval == -1.0 or f[i] + g[i] < minval: # <<<<<<<<<<<<<<
* minval = f[i] + g[i]
* opt_i = i
*/
__pyx_t_4 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_4) {
__pyx_t_23 = __pyx_v_i;
__pyx_t_24 = __pyx_v_i;
__pyx_t_2 = ((((*((double *) ( /* dim=0 */ (__pyx_v_f.data + __pyx_t_23 * __pyx_v_f.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_g.data + __pyx_t_24 * __pyx_v_g.strides[0]) )))) < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_2;
} else {
__pyx_t_1 = __pyx_t_4;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":454
* for i in range(f.shape[0]):
* if minval == -1.0 or f[i] + g[i] < minval:
* minval = f[i] + g[i] # <<<<<<<<<<<<<<
* opt_i = i
*
*/
__pyx_t_25 = __pyx_v_i;
__pyx_t_26 = __pyx_v_i;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_f.data + __pyx_t_25 * __pyx_v_f.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_g.data + __pyx_t_26 * __pyx_v_g.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":455
* if minval == -1.0 or f[i] + g[i] < minval:
* minval = f[i] + g[i]
* opt_i = i # <<<<<<<<<<<<<<
*
* # print "setting time %d to %d" % (l_over_2 + offset, opt_i)
*/
__pyx_v_opt_i = __pyx_v_i;
goto __pyx_L21;
}
__pyx_L21:;
}
/* "radiotool/algorithms/par_build_table.pyx":465
*
*
* global_path[l_over_2 + offset] = opt_i # <<<<<<<<<<<<<<
* # global_path_cost[l_over_2 + offset] = N.min(f + g)
*
*/
__pyx_t_21 = (__pyx_v_l_over_2 + __pyx_v_offset);
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_21 * __pyx_v_global_path.strides[0]) )) = __pyx_v_opt_i;
/* "radiotool/algorithms/par_build_table.pyx":470
* # first half
* divide_and_conquer_cost_and_path(
* tc, pen[:, :l_over_2 + 1], start_beat, opt_i, offset, global_path, p, # <<<<<<<<<<<<<<
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
*/
__pyx_t_28 = -1;
__pyx_t_27.data = __pyx_v_pen.data;
__pyx_t_27.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_27, 0);
__pyx_t_27.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_27.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_27.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_27,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_28,
0,
(__pyx_v_l_over_2 + 1),
0,
0,
1,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__pyx_v_tc, __pyx_t_27, __pyx_v_start_beat, __pyx_v_opt_i, __pyx_v_offset, __pyx_v_global_path, __pyx_v_p, __pyx_v_f, __pyx_v_g, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":469
*
* # first half
* divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* tc, pen[:, :l_over_2 + 1], start_beat, opt_i, offset, global_path, p,
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_27, 1);
/* "radiotool/algorithms/par_build_table.pyx":475
* # second half
* divide_and_conquer_cost_and_path(
* tc, pen[:, l_over_2:], opt_i, end_beat, l_over_2 + offset, global_path, p, # <<<<<<<<<<<<<<
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
*/
__pyx_t_28 = -1;
__pyx_t_29.data = __pyx_v_pen.data;
__pyx_t_29.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_29, 0);
__pyx_t_29.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_29.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_29.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_29,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_28,
__pyx_v_l_over_2,
0,
0,
1,
0,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__pyx_v_tc, __pyx_t_29, __pyx_v_opt_i, __pyx_v_end_beat, (__pyx_v_l_over_2 + __pyx_v_offset), __pyx_v_global_path, __pyx_v_p, __pyx_v_f, __pyx_v_g, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":474
*
* # second half
* divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* tc, pen[:, l_over_2:], opt_i, end_beat, l_over_2 + offset, global_path, p,
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_29, 1);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":478
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
* return # <<<<<<<<<<<<<<
*
*
*/
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":328
*
*
* cdef void divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int offset,
* int[:] global_path, Params p,
*/
/* function exit code */
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_20, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_22, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_27, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_29, 1);
__Pyx_WriteUnraisable("radiotool.algorithms.par_build_table.divide_and_conquer_cost_and_path", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_new_pen, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_tc_column, 1);
__Pyx_RefNannyFinishContext();
}
/* "radiotool/algorithms/par_build_table.pyx":481
*
*
* cpdef int[:] build_table(double[:, :] trans_cost, double[:, :] penalty, # <<<<<<<<<<<<<<
* int min_beats=-1, int max_beats=-1, int first_pause=-1):
*
*/
static PyObject *__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static __Pyx_memviewslice __pyx_f_9radiotool_10algorithms_15par_build_table_build_table(__Pyx_memviewslice __pyx_v_trans_cost, __Pyx_memviewslice __pyx_v_penalty, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table *__pyx_optional_args) {
int __pyx_v_min_beats = ((int)-1);
int __pyx_v_max_beats = ((int)-1);
int __pyx_v_first_pause = ((int)-1);
int __pyx_v_max_beats_with_padding;
struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p;
arrayobject *__pyx_v_dtemplate = 0;
arrayobject *__pyx_v_array1 = 0;
arrayobject *__pyx_v_array2 = 0;
arrayobject *__pyx_v_array3 = 0;
arrayobject *__pyx_v_array4 = 0;
arrayobject *__pyx_v_array5 = 0;
arrayobject *__pyx_v_array6 = 0;
arrayobject *__pyx_v_array7 = 0;
arrayobject *__pyx_v_array8 = 0;
__Pyx_memviewslice __pyx_v_mv1 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv2 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv3 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv4 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv5 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv6 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_f = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_g = { 0, 0, { 0 }, { 0 }, { 0 } };
arrayobject *__pyx_v_ar = 0;
arrayobject *__pyx_v_template = 0;
__Pyx_memviewslice __pyx_v_global_path = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_r = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("build_table", 0);
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_min_beats = __pyx_optional_args->min_beats;
if (__pyx_optional_args->__pyx_n > 1) {
__pyx_v_max_beats = __pyx_optional_args->max_beats;
if (__pyx_optional_args->__pyx_n > 2) {
__pyx_v_first_pause = __pyx_optional_args->first_pause;
}
}
}
}
/* "radiotool/algorithms/par_build_table.pyx":486
* cdef int max_beats_with_padding, i
*
* if max_beats != -1 and min_beats != -1: # <<<<<<<<<<<<<<
* # max_beats_with_padding = min_beats + max_beats
* max_beats_with_padding = max_beats
*/
__pyx_t_1 = ((__pyx_v_max_beats != -1) != 0);
if (__pyx_t_1) {
__pyx_t_2 = ((__pyx_v_min_beats != -1) != 0);
__pyx_t_3 = __pyx_t_2;
} else {
__pyx_t_3 = __pyx_t_1;
}
if (__pyx_t_3) {
/* "radiotool/algorithms/par_build_table.pyx":488
* if max_beats != -1 and min_beats != -1:
* # max_beats_with_padding = min_beats + max_beats
* max_beats_with_padding = max_beats # <<<<<<<<<<<<<<
* elif max_beats != -1:
* # 4? One measures of padding? Just a thought
*/
__pyx_v_max_beats_with_padding = __pyx_v_max_beats;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":489
* # max_beats_with_padding = min_beats + max_beats
* max_beats_with_padding = max_beats
* elif max_beats != -1: # <<<<<<<<<<<<<<
* # 4? One measures of padding? Just a thought
* max_beats_with_padding = max_beats
*/
__pyx_t_3 = ((__pyx_v_max_beats != -1) != 0);
if (__pyx_t_3) {
/* "radiotool/algorithms/par_build_table.pyx":491
* elif max_beats != -1:
* # 4? One measures of padding? Just a thought
* max_beats_with_padding = max_beats # <<<<<<<<<<<<<<
* elif min_beats != -1:
* max_beats = -1
*/
__pyx_v_max_beats_with_padding = __pyx_v_max_beats;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":492
* # 4? One measures of padding? Just a thought
* max_beats_with_padding = max_beats
* elif min_beats != -1: # <<<<<<<<<<<<<<
* max_beats = -1
* max_beats_with_padding = min_beats
*/
__pyx_t_3 = ((__pyx_v_min_beats != -1) != 0);
if (__pyx_t_3) {
/* "radiotool/algorithms/par_build_table.pyx":493
* max_beats_with_padding = max_beats
* elif min_beats != -1:
* max_beats = -1 # <<<<<<<<<<<<<<
* max_beats_with_padding = min_beats
* else:
*/
__pyx_v_max_beats = -1;
/* "radiotool/algorithms/par_build_table.pyx":494
* elif min_beats != -1:
* max_beats = -1
* max_beats_with_padding = min_beats # <<<<<<<<<<<<<<
* else:
* max_beats_with_padding = 1
*/
__pyx_v_max_beats_with_padding = __pyx_v_min_beats;
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":496
* max_beats_with_padding = min_beats
* else:
* max_beats_with_padding = 1 # <<<<<<<<<<<<<<
* max_beats = 1
* min_beats = 0
*/
__pyx_v_max_beats_with_padding = 1;
/* "radiotool/algorithms/par_build_table.pyx":497
* else:
* max_beats_with_padding = 1
* max_beats = 1 # <<<<<<<<<<<<<<
* min_beats = 0
*
*/
__pyx_v_max_beats = 1;
/* "radiotool/algorithms/par_build_table.pyx":498
* max_beats_with_padding = 1
* max_beats = 1
* min_beats = 0 # <<<<<<<<<<<<<<
*
* cdef Params p
*/
__pyx_v_min_beats = 0;
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":501
*
* cdef Params p
* p.pen_val = 99999999.0 # <<<<<<<<<<<<<<
* p.p0 = first_pause
* p.n_beats = p.p0
*/
__pyx_v_p.pen_val = 99999999.0;
/* "radiotool/algorithms/par_build_table.pyx":502
* cdef Params p
* p.pen_val = 99999999.0
* p.p0 = first_pause # <<<<<<<<<<<<<<
* p.n_beats = p.p0
* p.n_pauses = trans_cost.shape[0] - p.p0
*/
__pyx_v_p.p0 = __pyx_v_first_pause;
/* "radiotool/algorithms/par_build_table.pyx":503
* p.pen_val = 99999999.0
* p.p0 = first_pause
* p.n_beats = p.p0 # <<<<<<<<<<<<<<
* p.n_pauses = trans_cost.shape[0] - p.p0
* p.min_beats = min_beats
*/
__pyx_t_4 = __pyx_v_p.p0;
__pyx_v_p.n_beats = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":504
* p.p0 = first_pause
* p.n_beats = p.p0
* p.n_pauses = trans_cost.shape[0] - p.p0 # <<<<<<<<<<<<<<
* p.min_beats = min_beats
* p.max_beats = max_beats
*/
__pyx_v_p.n_pauses = ((__pyx_v_trans_cost.shape[0]) - __pyx_v_p.p0);
/* "radiotool/algorithms/par_build_table.pyx":505
* p.n_beats = p.p0
* p.n_pauses = trans_cost.shape[0] - p.p0
* p.min_beats = min_beats # <<<<<<<<<<<<<<
* p.max_beats = max_beats
* p.max_beats_with_padding = max_beats_with_padding
*/
__pyx_v_p.min_beats = __pyx_v_min_beats;
/* "radiotool/algorithms/par_build_table.pyx":506
* p.n_pauses = trans_cost.shape[0] - p.p0
* p.min_beats = min_beats
* p.max_beats = max_beats # <<<<<<<<<<<<<<
* p.max_beats_with_padding = max_beats_with_padding
* p.p0_full = p.n_beats * p.max_beats_with_padding
*/
__pyx_v_p.max_beats = __pyx_v_max_beats;
/* "radiotool/algorithms/par_build_table.pyx":507
* p.min_beats = min_beats
* p.max_beats = max_beats
* p.max_beats_with_padding = max_beats_with_padding # <<<<<<<<<<<<<<
* p.p0_full = p.n_beats * p.max_beats_with_padding
* p.all_full = p.p0_full + p.n_pauses
*/
__pyx_v_p.max_beats_with_padding = __pyx_v_max_beats_with_padding;
/* "radiotool/algorithms/par_build_table.pyx":508
* p.max_beats = max_beats
* p.max_beats_with_padding = max_beats_with_padding
* p.p0_full = p.n_beats * p.max_beats_with_padding # <<<<<<<<<<<<<<
* p.all_full = p.p0_full + p.n_pauses
*
*/
__pyx_v_p.p0_full = (__pyx_v_p.n_beats * __pyx_v_p.max_beats_with_padding);
/* "radiotool/algorithms/par_build_table.pyx":509
* p.max_beats_with_padding = max_beats_with_padding
* p.p0_full = p.n_beats * p.max_beats_with_padding
* p.all_full = p.p0_full + p.n_pauses # <<<<<<<<<<<<<<
*
* # double arrays for use throughout the computation
*/
__pyx_v_p.all_full = (__pyx_v_p.p0_full + __pyx_v_p.n_pauses);
/* "radiotool/algorithms/par_build_table.pyx":512
*
* # double arrays for use throughout the computation
* cdef array dtemplate = array('d') # <<<<<<<<<<<<<<
* cdef array array1, array2, array3, array4, array5, array6, array7, array8
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_7cpython_5array_array)), __pyx_tuple_, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_dtemplate = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":515
* cdef array array1, array2, array3, array4, array5, array6, array7, array8
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
* array1 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array2 = clone(dtemplate, p.all_full, False)
* array3 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array1 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":516
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
* array1 = clone(dtemplate, p.all_full, False)
* array2 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array3 = clone(dtemplate, p.all_full, False)
* array4 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array2 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":517
* array1 = clone(dtemplate, p.all_full, False)
* array2 = clone(dtemplate, p.all_full, False)
* array3 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array4 = clone(dtemplate, p.all_full, False)
* array5 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array3 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":518
* array2 = clone(dtemplate, p.all_full, False)
* array3 = clone(dtemplate, p.all_full, False)
* array4 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array5 = clone(dtemplate, p.all_full, False)
* array6 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array4 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":519
* array3 = clone(dtemplate, p.all_full, False)
* array4 = clone(dtemplate, p.all_full, False)
* array5 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array6 = clone(dtemplate, p.all_full, False)
* array7 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array5 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":520
* array4 = clone(dtemplate, p.all_full, False)
* array5 = clone(dtemplate, p.all_full, False)
* array6 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array7 = clone(dtemplate, p.all_full, False)
* array8 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array6 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":521
* array5 = clone(dtemplate, p.all_full, False)
* array6 = clone(dtemplate, p.all_full, False)
* array7 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array8 = clone(dtemplate, p.all_full, False)
* f = array1
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array7 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":522
* array6 = clone(dtemplate, p.all_full, False)
* array7 = clone(dtemplate, p.all_full, False)
* array8 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* f = array1
* g = array2
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array8 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":523
* array7 = clone(dtemplate, p.all_full, False)
* array8 = clone(dtemplate, p.all_full, False)
* f = array1 # <<<<<<<<<<<<<<
* g = array2
* mv1 = array3
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array1));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":524
* array8 = clone(dtemplate, p.all_full, False)
* f = array1
* g = array2 # <<<<<<<<<<<<<<
* mv1 = array3
* mv2 = array4
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array2));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_g = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":525
* f = array1
* g = array2
* mv1 = array3 # <<<<<<<<<<<<<<
* mv2 = array4
* mv3 = array5
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array3));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv1 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":526
* g = array2
* mv1 = array3
* mv2 = array4 # <<<<<<<<<<<<<<
* mv3 = array5
* mv4 = array6
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array4));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 526; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv2 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":527
* mv1 = array3
* mv2 = array4
* mv3 = array5 # <<<<<<<<<<<<<<
* mv4 = array6
* mv5 = array7
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array5));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 527; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv3 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":528
* mv2 = array4
* mv3 = array5
* mv4 = array6 # <<<<<<<<<<<<<<
* mv5 = array7
* mv6 = array8
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array6));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 528; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv4 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":529
* mv3 = array5
* mv4 = array6
* mv5 = array7 # <<<<<<<<<<<<<<
* mv6 = array8
*
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array7));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv5 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":530
* mv4 = array6
* mv5 = array7
* mv6 = array8 # <<<<<<<<<<<<<<
*
* cdef array ar, template = array('i')
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array8));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv6 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":532
* mv6 = array8
*
* cdef array ar, template = array('i') # <<<<<<<<<<<<<<
* ar = clone(template, penalty.shape[1], False)
* cdef int[:] global_path = ar
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_7cpython_5array_array)), __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_template = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":533
*
* cdef array ar, template = array('i')
* ar = clone(template, penalty.shape[1], False) # <<<<<<<<<<<<<<
* cdef int[:] global_path = ar
*
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_template, (__pyx_v_penalty.shape[1]), 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_ar = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":534
* cdef array ar, template = array('i')
* ar = clone(template, penalty.shape[1], False)
* cdef int[:] global_path = ar # <<<<<<<<<<<<<<
*
* divide_and_conquer_cost_and_path(trans_cost, penalty, -1, -1, 0, global_path, p,
*/
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(((PyObject *)__pyx_v_ar));
if (unlikely(!__pyx_t_7.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_global_path = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":536
* cdef int[:] global_path = ar
*
* divide_and_conquer_cost_and_path(trans_cost, penalty, -1, -1, 0, global_path, p, # <<<<<<<<<<<<<<
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__pyx_v_trans_cost, __pyx_v_penalty, -1, -1, 0, __pyx_v_global_path, __pyx_v_p, __pyx_v_f, __pyx_v_g, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":539
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
* return global_path # <<<<<<<<<<<<<<
*/
__PYX_INC_MEMVIEW(&__pyx_v_global_path, 0);
__pyx_r = __pyx_v_global_path;
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":481
*
*
* cpdef int[:] build_table(double[:, :] trans_cost, double[:, :] penalty, # <<<<<<<<<<<<<<
* int min_beats=-1, int max_beats=-1, int first_pause=-1):
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 1);
__pyx_r.data = NULL;
__pyx_r.memview = NULL;
__Pyx_AddTraceback("radiotool.algorithms.par_build_table.build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
goto __pyx_L2;
__pyx_L0:;
if (unlikely(!__pyx_r.memview)) {
PyErr_SetString(PyExc_TypeError,"Memoryview return value is not initialized");
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_dtemplate);
__Pyx_XDECREF((PyObject *)__pyx_v_array1);
__Pyx_XDECREF((PyObject *)__pyx_v_array2);
__Pyx_XDECREF((PyObject *)__pyx_v_array3);
__Pyx_XDECREF((PyObject *)__pyx_v_array4);
__Pyx_XDECREF((PyObject *)__pyx_v_array5);
__Pyx_XDECREF((PyObject *)__pyx_v_array6);
__Pyx_XDECREF((PyObject *)__pyx_v_array7);
__Pyx_XDECREF((PyObject *)__pyx_v_array8);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv1, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv2, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv3, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv4, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv5, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv6, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_f, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_g, 1);
__Pyx_XDECREF((PyObject *)__pyx_v_ar);
__Pyx_XDECREF((PyObject *)__pyx_v_template);
__PYX_XDEC_MEMVIEW(&__pyx_v_global_path, 1);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_trans_cost = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_penalty = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_min_beats;
int __pyx_v_max_beats;
int __pyx_v_first_pause;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("build_table (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_trans_cost,&__pyx_n_s_penalty,&__pyx_n_s_min_beats,&__pyx_n_s_max_beats,&__pyx_n_s_first_pause,0};
PyObject* values[5] = {0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_trans_cost)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_penalty)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("build_table", 0, 2, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_min_beats);
if (value) { values[2] = value; kw_args--; }
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_max_beats);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_first_pause);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "build_table") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_trans_cost = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_trans_cost.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_penalty = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_penalty.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[2]) {
__pyx_v_min_beats = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_min_beats == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_min_beats = ((int)-1);
}
if (values[3]) {
__pyx_v_max_beats = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_max_beats == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_max_beats = ((int)-1);
}
if (values[4]) {
__pyx_v_first_pause = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_first_pause == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_first_pause = ((int)-1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("build_table", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("radiotool.algorithms.par_build_table.build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_9radiotool_10algorithms_15par_build_table_build_table(__pyx_self, __pyx_v_trans_cost, __pyx_v_penalty, __pyx_v_min_beats, __pyx_v_max_beats, __pyx_v_first_pause);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9radiotool_10algorithms_15par_build_table_build_table(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_trans_cost, __Pyx_memviewslice __pyx_v_penalty, int __pyx_v_min_beats, int __pyx_v_max_beats, int __pyx_v_first_pause) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("build_table", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_trans_cost.memview)) { __Pyx_RaiseUnboundLocalError("trans_cost"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
if (unlikely(!__pyx_v_penalty.memview)) { __Pyx_RaiseUnboundLocalError("penalty"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
__pyx_t_2.__pyx_n = 3;
__pyx_t_2.min_beats = __pyx_v_min_beats;
__pyx_t_2.max_beats = __pyx_v_max_beats;
__pyx_t_2.first_pause = __pyx_v_first_pause;
__pyx_t_1 = __pyx_f_9radiotool_10algorithms_15par_build_table_build_table(__pyx_v_trans_cost, __pyx_v_penalty, 0, &__pyx_t_2); if (unlikely(!__pyx_t_1.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_t_1, 1, (PyObject *(*)(char *)) __pyx_memview_get_int, (int (*)(char *, PyObject *)) __pyx_memview_set_int, 0);; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__PYX_XDEC_MEMVIEW(&__pyx_t_1, 1);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_1, 1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("radiotool.algorithms.par_build_table.build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_trans_cost, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_penalty, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":91
* __data_union data
*
* def __getbuffer__(self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_7cpython_5array_5array_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_7cpython_5array_5array_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_7cpython_5array_5array___getbuffer__(((arrayobject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7cpython_5array_5array___getbuffer__(arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info, CYTHON_UNUSED int __pyx_v_flags) {
PyObject *__pyx_v_item_count = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
char *__pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "cpython/array.pxd":96
* # In particular strided access is always provided regardless
* # of flags
* item_count = Py_SIZE(self) # <<<<<<<<<<<<<<
*
* info.suboffsets = NULL
*/
__pyx_t_1 = PyInt_FromSsize_t(Py_SIZE(((PyObject *)__pyx_v_self))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_item_count = __pyx_t_1;
__pyx_t_1 = 0;
/* "cpython/array.pxd":98
* item_count = Py_SIZE(self)
*
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.buf = self.data.as_chars
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "cpython/array.pxd":99
*
* info.suboffsets = NULL
* info.buf = self.data.as_chars # <<<<<<<<<<<<<<
* info.readonly = 0
* info.ndim = 1
*/
__pyx_t_2 = __pyx_v_self->data.as_chars;
__pyx_v_info->buf = __pyx_t_2;
/* "cpython/array.pxd":100
* info.suboffsets = NULL
* info.buf = self.data.as_chars
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.ndim = 1
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float)
*/
__pyx_v_info->readonly = 0;
/* "cpython/array.pxd":101
* info.buf = self.data.as_chars
* info.readonly = 0
* info.ndim = 1 # <<<<<<<<<<<<<<
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float)
* info.len = info.itemsize * item_count
*/
__pyx_v_info->ndim = 1;
/* "cpython/array.pxd":102
* info.readonly = 0
* info.ndim = 1
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float) # <<<<<<<<<<<<<<
* info.len = info.itemsize * item_count
*
*/
__pyx_t_3 = __pyx_v_self->ob_descr->itemsize;
__pyx_v_info->itemsize = __pyx_t_3;
/* "cpython/array.pxd":103
* info.ndim = 1
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float)
* info.len = info.itemsize * item_count # <<<<<<<<<<<<<<
*
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2)
*/
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_info->itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = PyNumber_Multiply(__pyx_t_1, __pyx_v_item_count); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_4); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_info->len = __pyx_t_5;
/* "cpython/array.pxd":105
* info.len = info.itemsize * item_count
*
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2) # <<<<<<<<<<<<<<
* if not info.shape:
* raise MemoryError()
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyMem_Malloc(((sizeof(Py_ssize_t)) + 2)));
/* "cpython/array.pxd":106
*
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2)
* if not info.shape: # <<<<<<<<<<<<<<
* raise MemoryError()
* info.shape[0] = item_count # constant regardless of resizing
*/
__pyx_t_6 = ((!(__pyx_v_info->shape != 0)) != 0);
if (__pyx_t_6) {
/* "cpython/array.pxd":107
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2)
* if not info.shape:
* raise MemoryError() # <<<<<<<<<<<<<<
* info.shape[0] = item_count # constant regardless of resizing
* info.strides = &info.itemsize
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "cpython/array.pxd":108
* if not info.shape:
* raise MemoryError()
* info.shape[0] = item_count # constant regardless of resizing # <<<<<<<<<<<<<<
* info.strides = &info.itemsize
*
*/
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_v_item_count); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
(__pyx_v_info->shape[0]) = __pyx_t_5;
/* "cpython/array.pxd":109
* raise MemoryError()
* info.shape[0] = item_count # constant regardless of resizing
* info.strides = &info.itemsize # <<<<<<<<<<<<<<
*
* info.format = <char*> (info.shape + 1)
*/
__pyx_v_info->strides = (&__pyx_v_info->itemsize);
/* "cpython/array.pxd":111
* info.strides = &info.itemsize
*
* info.format = <char*> (info.shape + 1) # <<<<<<<<<<<<<<
* info.format[0] = self.ob_descr.typecode
* info.format[1] = 0
*/
__pyx_v_info->format = ((char *)(__pyx_v_info->shape + 1));
/* "cpython/array.pxd":112
*
* info.format = <char*> (info.shape + 1)
* info.format[0] = self.ob_descr.typecode # <<<<<<<<<<<<<<
* info.format[1] = 0
* info.obj = self
*/
__pyx_t_3 = __pyx_v_self->ob_descr->typecode;
(__pyx_v_info->format[0]) = __pyx_t_3;
/* "cpython/array.pxd":113
* info.format = <char*> (info.shape + 1)
* info.format[0] = self.ob_descr.typecode
* info.format[1] = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
(__pyx_v_info->format[1]) = 0;
/* "cpython/array.pxd":114
* info.format[0] = self.ob_descr.typecode
* info.format[1] = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* def __releasebuffer__(self, Py_buffer* info):
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "cpython/array.pxd":91
* __data_union data
*
* def __getbuffer__(self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("cpython.array.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF(__pyx_v_item_count);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":116
* info.obj = self
*
* def __releasebuffer__(self, Py_buffer* info): # <<<<<<<<<<<<<<
* PyMem_Free(info.shape)
*
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_7cpython_5array_5array_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_7cpython_5array_5array_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_7cpython_5array_5array_2__releasebuffer__(((arrayobject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_7cpython_5array_5array_2__releasebuffer__(CYTHON_UNUSED arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "cpython/array.pxd":117
*
* def __releasebuffer__(self, Py_buffer* info):
* PyMem_Free(info.shape) # <<<<<<<<<<<<<<
*
* array newarrayobject(PyTypeObject* type, Py_ssize_t size, arraydescr *descr)
*/
PyMem_Free(__pyx_v_info->shape);
/* "cpython/array.pxd":116
* info.obj = self
*
* def __releasebuffer__(self, Py_buffer* info): # <<<<<<<<<<<<<<
* PyMem_Free(info.shape)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "cpython/array.pxd":128
*
*
* cdef inline array clone(array template, Py_ssize_t length, bint zero): # <<<<<<<<<<<<<<
* """ fast creation of a new array, given a template array.
* type will be same as template.
*/
static CYTHON_INLINE arrayobject *__pyx_f_7cpython_5array_clone(arrayobject *__pyx_v_template, Py_ssize_t __pyx_v_length, int __pyx_v_zero) {
arrayobject *__pyx_v_op = NULL;
arrayobject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("clone", 0);
/* "cpython/array.pxd":132
* type will be same as template.
* if zero is true, new array will be initialized with zeroes."""
* op = newarrayobject(Py_TYPE(template), length, template.ob_descr) # <<<<<<<<<<<<<<
* if zero and op is not None:
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize)
*/
__pyx_t_1 = ((PyObject *)newarrayobject(Py_TYPE(((PyObject *)__pyx_v_template)), __pyx_v_length, __pyx_v_template->ob_descr)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_op = ((arrayobject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "cpython/array.pxd":133
* if zero is true, new array will be initialized with zeroes."""
* op = newarrayobject(Py_TYPE(template), length, template.ob_descr)
* if zero and op is not None: # <<<<<<<<<<<<<<
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize)
* return op
*/
if ((__pyx_v_zero != 0)) {
__pyx_t_2 = (((PyObject *)__pyx_v_op) != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
} else {
__pyx_t_3 = (__pyx_v_zero != 0);
}
if (__pyx_t_3) {
/* "cpython/array.pxd":134
* op = newarrayobject(Py_TYPE(template), length, template.ob_descr)
* if zero and op is not None:
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize) # <<<<<<<<<<<<<<
* return op
*
*/
memset(__pyx_v_op->data.as_chars, 0, (__pyx_v_length * __pyx_v_op->ob_descr->itemsize));
goto __pyx_L3;
}
__pyx_L3:;
/* "cpython/array.pxd":135
* if zero and op is not None:
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize)
* return op # <<<<<<<<<<<<<<
*
* cdef inline array copy(array self):
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_op));
__pyx_r = __pyx_v_op;
goto __pyx_L0;
/* "cpython/array.pxd":128
*
*
* cdef inline array clone(array template, Py_ssize_t length, bint zero): # <<<<<<<<<<<<<<
* """ fast creation of a new array, given a template array.
* type will be same as template.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("cpython.array.clone", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_op);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":137
* return op
*
* cdef inline array copy(array self): # <<<<<<<<<<<<<<
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
*/
static CYTHON_INLINE arrayobject *__pyx_f_7cpython_5array_copy(arrayobject *__pyx_v_self) {
arrayobject *__pyx_v_op = NULL;
arrayobject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "cpython/array.pxd":139
* cdef inline array copy(array self):
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr) # <<<<<<<<<<<<<<
* memcpy(op.data.as_chars, self.data.as_chars, Py_SIZE(op) * op.ob_descr.itemsize)
* return op
*/
__pyx_t_1 = ((PyObject *)newarrayobject(Py_TYPE(((PyObject *)__pyx_v_self)), Py_SIZE(((PyObject *)__pyx_v_self)), __pyx_v_self->ob_descr)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_op = ((arrayobject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "cpython/array.pxd":140
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
* memcpy(op.data.as_chars, self.data.as_chars, Py_SIZE(op) * op.ob_descr.itemsize) # <<<<<<<<<<<<<<
* return op
*
*/
memcpy(__pyx_v_op->data.as_chars, __pyx_v_self->data.as_chars, (Py_SIZE(((PyObject *)__pyx_v_op)) * __pyx_v_op->ob_descr->itemsize));
/* "cpython/array.pxd":141
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
* memcpy(op.data.as_chars, self.data.as_chars, Py_SIZE(op) * op.ob_descr.itemsize)
* return op # <<<<<<<<<<<<<<
*
* cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1:
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_op));
__pyx_r = __pyx_v_op;
goto __pyx_L0;
/* "cpython/array.pxd":137
* return op
*
* cdef inline array copy(array self): # <<<<<<<<<<<<<<
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("cpython.array.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_op);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":143
* return op
*
* cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1: # <<<<<<<<<<<<<<
* """ efficent appending of new stuff of same type
* (e.g. of same array type)
*/
static CYTHON_INLINE int __pyx_f_7cpython_5array_extend_buffer(arrayobject *__pyx_v_self, char *__pyx_v_stuff, Py_ssize_t __pyx_v_n) {
Py_ssize_t __pyx_v_itemsize;
Py_ssize_t __pyx_v_origsize;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("extend_buffer", 0);
/* "cpython/array.pxd":147
* (e.g. of same array type)
* n: number of elements (not number of bytes!) """
* cdef Py_ssize_t itemsize = self.ob_descr.itemsize # <<<<<<<<<<<<<<
* cdef Py_ssize_t origsize = Py_SIZE(self)
* resize_smart(self, origsize + n)
*/
__pyx_t_1 = __pyx_v_self->ob_descr->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "cpython/array.pxd":148
* n: number of elements (not number of bytes!) """
* cdef Py_ssize_t itemsize = self.ob_descr.itemsize
* cdef Py_ssize_t origsize = Py_SIZE(self) # <<<<<<<<<<<<<<
* resize_smart(self, origsize + n)
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize)
*/
__pyx_v_origsize = Py_SIZE(((PyObject *)__pyx_v_self));
/* "cpython/array.pxd":149
* cdef Py_ssize_t itemsize = self.ob_descr.itemsize
* cdef Py_ssize_t origsize = Py_SIZE(self)
* resize_smart(self, origsize + n) # <<<<<<<<<<<<<<
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize)
* return 0
*/
__pyx_t_1 = resize_smart(__pyx_v_self, (__pyx_v_origsize + __pyx_v_n)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "cpython/array.pxd":150
* cdef Py_ssize_t origsize = Py_SIZE(self)
* resize_smart(self, origsize + n)
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize) # <<<<<<<<<<<<<<
* return 0
*
*/
memcpy((__pyx_v_self->data.as_chars + (__pyx_v_origsize * __pyx_v_itemsize)), __pyx_v_stuff, (__pyx_v_n * __pyx_v_itemsize));
/* "cpython/array.pxd":151
* resize_smart(self, origsize + n)
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize)
* return 0 # <<<<<<<<<<<<<<
*
* cdef inline int extend(array self, array other) except -1:
*/
__pyx_r = 0;
goto __pyx_L0;
/* "cpython/array.pxd":143
* return op
*
* cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1: # <<<<<<<<<<<<<<
* """ efficent appending of new stuff of same type
* (e.g. of same array type)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_AddTraceback("cpython.array.extend_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":153
* return 0
*
* cdef inline int extend(array self, array other) except -1: # <<<<<<<<<<<<<<
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode:
*/
static CYTHON_INLINE int __pyx_f_7cpython_5array_extend(arrayobject *__pyx_v_self, arrayobject *__pyx_v_other) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("extend", 0);
/* "cpython/array.pxd":155
* cdef inline int extend(array self, array other) except -1:
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode: # <<<<<<<<<<<<<<
* PyErr_BadArgument()
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*/
__pyx_t_1 = ((__pyx_v_self->ob_descr->typecode != __pyx_v_other->ob_descr->typecode) != 0);
if (__pyx_t_1) {
/* "cpython/array.pxd":156
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode:
* PyErr_BadArgument() # <<<<<<<<<<<<<<
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*
*/
__pyx_t_2 = PyErr_BadArgument(); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
/* "cpython/array.pxd":157
* if self.ob_descr.typecode != other.ob_descr.typecode:
* PyErr_BadArgument()
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other)) # <<<<<<<<<<<<<<
*
* cdef inline void zero(array self):
*/
__pyx_t_2 = __pyx_f_7cpython_5array_extend_buffer(__pyx_v_self, __pyx_v_other->data.as_chars, Py_SIZE(((PyObject *)__pyx_v_other))); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_t_2;
goto __pyx_L0;
/* "cpython/array.pxd":153
* return 0
*
* cdef inline int extend(array self, array other) except -1: # <<<<<<<<<<<<<<
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_AddTraceback("cpython.array.extend", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":159
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*
* cdef inline void zero(array self): # <<<<<<<<<<<<<<
* """ set all elements of array to zero. """
* memset(self.data.as_chars, 0, Py_SIZE(self) * self.ob_descr.itemsize)
*/
static CYTHON_INLINE void __pyx_f_7cpython_5array_zero(arrayobject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("zero", 0);
/* "cpython/array.pxd":161
* cdef inline void zero(array self):
* """ set all elements of array to zero. """
* memset(self.data.as_chars, 0, Py_SIZE(self) * self.ob_descr.itemsize) # <<<<<<<<<<<<<<
*/
memset(__pyx_v_self->data.as_chars, 0, (Py_SIZE(((PyObject *)__pyx_v_self)) * __pyx_v_self->ob_descr->itemsize));
/* "cpython/array.pxd":159
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*
* cdef inline void zero(array self): # <<<<<<<<<<<<<<
* """ set all elements of array to zero. """
* memset(self.data.as_chars, 0, Py_SIZE(self) * self.ob_descr.itemsize)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":113
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode=u"c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_u_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "View.MemoryView":114
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode=u"c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_r = __pyx_array_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":113
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode=u"c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
PyObject **__pyx_v_p;
PyObject *__pyx_v_encode = NULL;
PyObject *__pyx_v_dim = NULL;
char __pyx_v_order;
PyObject *__pyx_v_decode = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
__Pyx_INCREF(__pyx_v_mode);
/* "View.MemoryView":120
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":121
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":123
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":124
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if self.itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":126
* raise ValueError("Empty shape tuple for cython.array")
*
* if self.itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_self->itemsize <= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":127
*
* if self.itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* encode = getattr(format, 'encode', None)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":129
* raise ValueError("itemsize <= 0 for cython.array")
*
* encode = getattr(format, 'encode', None) # <<<<<<<<<<<<<<
* if encode:
* format = encode('ASCII')
*/
__pyx_t_3 = __Pyx_GetAttr3(__pyx_v_format, __pyx_n_s_encode, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_encode = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":130
*
* encode = getattr(format, 'encode', None)
* if encode: # <<<<<<<<<<<<<<
* format = encode('ASCII')
* self._format = format
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_encode); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_2) {
/* "View.MemoryView":131
* encode = getattr(format, 'encode', None)
* if encode:
* format = encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format
* self.format = self._format
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_encode, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":132
* if encode:
* format = encode('ASCII')
* self._format = format # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":133
* format = encode('ASCII')
* self._format = format
* self.format = self._format # <<<<<<<<<<<<<<
*
* self._shape = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
*/
__pyx_t_4 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_4) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->format = __pyx_t_4;
/* "View.MemoryView":135
* self.format = self._format
*
* self._shape = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim) # <<<<<<<<<<<<<<
* self._strides = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)malloc(((sizeof(Py_ssize_t)) * __pyx_v_self->ndim)));
/* "View.MemoryView":136
*
* self._shape = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
* self._strides = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim) # <<<<<<<<<<<<<<
*
* if not self._shape or not self._strides:
*/
__pyx_v_self->_strides = ((Py_ssize_t *)malloc(((sizeof(Py_ssize_t)) * __pyx_v_self->ndim)));
/* "View.MemoryView":138
* self._strides = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
*
* if not self._shape or not self._strides: # <<<<<<<<<<<<<<
* free(self._shape)
* free(self._strides)
*/
__pyx_t_2 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (!__pyx_t_2) {
__pyx_t_5 = ((!(__pyx_v_self->_strides != 0)) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_2;
}
if (__pyx_t_6) {
/* "View.MemoryView":139
*
* if not self._shape or not self._strides:
* free(self._shape) # <<<<<<<<<<<<<<
* free(self._strides)
* raise MemoryError("unable to allocate shape or strides.")
*/
free(__pyx_v_self->_shape);
/* "View.MemoryView":140
* if not self._shape or not self._strides:
* free(self._shape)
* free(self._strides) # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape or strides.")
*
*/
free(__pyx_v_self->_strides);
/* "View.MemoryView":141
* free(self._shape)
* free(self._strides)
* raise MemoryError("unable to allocate shape or strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":144
*
*
* idx = 0 # <<<<<<<<<<<<<<
* for idx, dim in enumerate(shape):
* if dim <= 0:
*/
__pyx_v_idx = 0;
/* "View.MemoryView":145
*
* idx = 0
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_7 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_8); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_8 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__Pyx_XDECREF_SET(__pyx_v_dim, __pyx_t_8);
__pyx_t_8 = 0;
__pyx_v_idx = __pyx_t_7;
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":146
* idx = 0
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*
*/
__pyx_t_8 = PyObject_RichCompare(__pyx_v_dim, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
if (__pyx_t_6) {
/* "View.MemoryView":147
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
*
* self._shape[idx] = dim
*/
__pyx_t_8 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_dim);
PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_dim);
__Pyx_GIVEREF(__pyx_v_dim);
__pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_9); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":149
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*
* self._shape[idx] = dim # <<<<<<<<<<<<<<
* idx += 1
*
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_dim); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_t_10;
/* "View.MemoryView":150
*
* self._shape[idx] = dim
* idx += 1 # <<<<<<<<<<<<<<
*
* if mode not in ("fortran", "c"):
*/
__pyx_v_idx = (__pyx_v_idx + 1);
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":152
* idx += 1
*
* if mode not in ("fortran", "c"): # <<<<<<<<<<<<<<
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
*/
__Pyx_INCREF(__pyx_v_mode);
__pyx_t_3 = __pyx_v_mode;
__pyx_t_6 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_n_s_fortran, Py_NE)); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_6) {
__pyx_t_2 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_n_s_c, Py_NE)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = __pyx_t_2;
} else {
__pyx_t_5 = __pyx_t_6;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = (__pyx_t_5 != 0);
if (__pyx_t_6) {
/* "View.MemoryView":153
*
* if mode not in ("fortran", "c"):
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* cdef char order
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":156
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = 'F'
* else:
*/
__pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_6) {
/* "View.MemoryView":157
* cdef char order
* if mode == 'fortran':
* order = 'F' # <<<<<<<<<<<<<<
* else:
* order = 'C'
*/
__pyx_v_order = 'F';
goto __pyx_L11;
}
/*else*/ {
/* "View.MemoryView":159
* order = 'F'
* else:
* order = 'C' # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
__pyx_v_order = 'C';
}
__pyx_L11:;
/* "View.MemoryView":161
* order = 'C'
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":164
* itemsize, self.ndim, order)
*
* decode = getattr(mode, 'decode', None) # <<<<<<<<<<<<<<
* if decode:
* mode = decode('ASCII')
*/
__pyx_t_3 = __Pyx_GetAttr3(__pyx_v_mode, __pyx_n_s_decode, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_decode = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":165
*
* decode = getattr(mode, 'decode', None)
* if decode: # <<<<<<<<<<<<<<
* mode = decode('ASCII')
* self.mode = mode
*/
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_v_decode); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_6) {
/* "View.MemoryView":166
* decode = getattr(mode, 'decode', None)
* if decode:
* mode = decode('ASCII') # <<<<<<<<<<<<<<
* self.mode = mode
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_decode, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_mode, __pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L12;
}
__pyx_L12:;
/* "View.MemoryView":167
* if decode:
* mode = decode('ASCII')
* self.mode = mode # <<<<<<<<<<<<<<
*
* self.free_data = allocate_buffer
*/
if (!(likely(PyUnicode_CheckExact(__pyx_v_mode))||((__pyx_v_mode) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_v_mode)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_v_mode;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":169
* self.mode = mode
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
* self.data = <char *>malloc(self.len)
*/
__pyx_t_3 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_6;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
* self.data = <char *>malloc(self.len)
* if not self.data:
*/
__pyx_t_6 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_6) {
/* "View.MemoryView":172
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":173
* if allocate_buffer:
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_6 = ((!(__pyx_v_self->data != 0)) != 0);
if (__pyx_t_6) {
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":176
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_6 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_6) {
/* "View.MemoryView":177
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":178
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize);
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_1; __pyx_t_10+=1) {
__pyx_v_i = __pyx_t_10;
/* "View.MemoryView":179
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":180
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
goto __pyx_L15;
}
__pyx_L15:;
goto __pyx_L13;
}
__pyx_L13:;
/* "View.MemoryView":113
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode=u"c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_encode);
__Pyx_XDECREF(__pyx_v_dim);
__Pyx_XDECREF(__pyx_v_decode);
__Pyx_XDECREF(__pyx_v_format);
__Pyx_XDECREF(__pyx_v_mode);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == b"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":184
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == b"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":185
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == b"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == b"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_b_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":186
* cdef int bufmode = -1
* if self.mode == b"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == b"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
/* "View.MemoryView":187
* if self.mode == b"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == b"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_b_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":188
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == b"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":189
* elif self.mode == b"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":191
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":192
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":193
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":194
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":195
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":196
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":197
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":198
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":200
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":201
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":203
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":205
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == b"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":210
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":211
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
goto __pyx_L3;
}
/* "View.MemoryView":212
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":214
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":216
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
*
* free(self._strides)
*/
free(__pyx_v_self->data);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":218
* free(self.data)
*
* free(self._strides) # <<<<<<<<<<<<<<
* free(self._shape)
*
*/
free(__pyx_v_self->_strides);
/* "View.MemoryView":219
*
* free(self._strides)
* free(self._shape) # <<<<<<<<<<<<<<
*
* property memview:
*/
free(__pyx_v_self->_shape);
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":223
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* Python wrapper */
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = get_memview_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *get_memview_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":225
* def __get__(self):
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":226
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":223
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":230
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":233
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":236
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":244
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":245
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":248
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":249
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":251
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":278
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":280
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":296
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":300
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":302
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":303
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":305
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview')
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":323
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":324
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":325
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":326
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_1 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)((PyObject *)__pyx_memoryview_type)));
if (!(__pyx_t_1 != 0)) {
__pyx_t_2 = (__pyx_v_obj != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
} else {
__pyx_t_3 = (__pyx_t_1 != 0);
}
if (__pyx_t_3) {
/* "View.MemoryView":327
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":328
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_3 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":329
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":330
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* self.lock = PyThread_allocate_lock()
*/
Py_INCREF(Py_None);
goto __pyx_L4;
}
__pyx_L4:;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":332
* Py_INCREF(Py_None)
*
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock == NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":333
*
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_3 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":334
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":336
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = self.view.format == b'O'
* else:
*/
__pyx_t_3 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":337
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_3;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":339
* self.dtype_is_object = self.view.format == b'O'
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L6:;
/* "View.MemoryView":341
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":343
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":323
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":345
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":346
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":347
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* if self.lock != NULL:
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":349
* __Pyx_ReleaseBuffer(&self.view)
*
* if self.lock != NULL: # <<<<<<<<<<<<<<
* PyThread_free_lock(self.lock)
*
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":350
*
* if self.lock != NULL:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":345
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":352
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":354
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":356
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (PyList_CheckExact(__pyx_v_index) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext;
}
for (;;) {
if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":357
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_7;
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":359
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":352
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":362
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":363
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":364
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
}
/* "View.MemoryView":366
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":369
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_2) {
/* "View.MemoryView":370
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":372
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":373
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":362
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":375
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":376
*
* def __setitem__(memoryview self, object index, object value):
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (likely(__pyx_t_1 != Py_None)) {
PyObject* sequence = __pyx_t_1;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":378
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":379
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_obj = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":380
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":381
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":383
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
__pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":385
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L3:;
/* "View.MemoryView":375
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":387
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":388
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, ((PyObject *)__pyx_memoryview_type));
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":389
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":390
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":391
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":390
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L11_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":392
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":393
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
goto __pyx_L5_exception_handled;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L5_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
__pyx_L11_try_end:;
}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":395
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":387
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":397
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":401
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":402
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":403
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":401
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":397
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":405
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[128];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":407
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":412
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":414
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":415
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":416
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":417
* tmp = malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":418
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":420
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":422
* item = <void *> array
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* try:
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":425
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* try: # <<<<<<<<<<<<<<
* self.assign_item_from_object(<char *> item, value)
* except:
*/
{
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":426
* else:
* try:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
* except:
* free(tmp)
*/
__pyx_t_5 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 426; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L13_try_end;
__pyx_L6_error:;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "View.MemoryView":427
* try:
* self.assign_item_from_object(<char *> item, value)
* except: # <<<<<<<<<<<<<<
* free(tmp)
* raise
*/
/*except:*/ {
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L8_except_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":428
* self.assign_item_from_object(<char *> item, value)
* except:
* free(tmp) # <<<<<<<<<<<<<<
* raise
*
*/
free(__pyx_v_tmp);
/* "View.MemoryView":429
* except:
* free(tmp)
* raise # <<<<<<<<<<<<<<
*
*
*/
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_ErrRestore(__pyx_t_5, __pyx_t_6, __pyx_t_7);
__pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L8_except_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
goto __pyx_L7_exception_handled;
}
__pyx_L8_except_error:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L7_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
__pyx_L13_try_end:;
}
}
__pyx_L5:;
/* "View.MemoryView":433
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":434
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_7 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
goto __pyx_L16;
}
__pyx_L16:;
/* "View.MemoryView":435
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* free(tmp)
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
/* "View.MemoryView":437
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
* free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
free(__pyx_v_tmp);
/* "View.MemoryView":405
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":439
* free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":440
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 440; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":441
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":439
* free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":443
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
size_t __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":446
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":449
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 449; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":450
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":451
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
__pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
}
/*else:*/ {
/* "View.MemoryView":455
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
__pyx_t_7 = strlen(__pyx_v_self->view.format);
__pyx_t_8 = ((__pyx_t_7 == 1) != 0);
if (__pyx_t_8) {
/* "View.MemoryView":456
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;};
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L6_except_return;
}
/* "View.MemoryView":457
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L10_try_end;
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "View.MemoryView":452
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = PyErr_ExceptionMatches(__pyx_t_5);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":453
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L4_exception_handled;
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
__pyx_L4_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
__pyx_L10_try_end:;
}
/* "View.MemoryView":443
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":459
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
Py_ssize_t __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
char *__pyx_t_9;
char *__pyx_t_10;
char *__pyx_t_11;
char *__pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":462
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":467
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":468
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":470
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_6);
__pyx_t_6 = 0;
}
__pyx_L3:;
/* "View.MemoryView":472
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 472; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_8 = __pyx_v_bytesvalue;
__pyx_t_10 = PyBytes_AS_STRING(__pyx_t_8);
__pyx_t_11 = (__pyx_t_10 + PyBytes_GET_SIZE(__pyx_t_8));
for (__pyx_t_12 = __pyx_t_10; __pyx_t_12 < __pyx_t_11; __pyx_t_12++) {
__pyx_t_9 = __pyx_t_12;
__pyx_v_c = (__pyx_t_9[0]);
/* "View.MemoryView":473
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_7;
/* "View.MemoryView":472
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":473
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":459
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":476
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
char *__pyx_t_3;
void *__pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":477
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":478
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_2 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_2;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":480
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
__pyx_v_info->shape = NULL;
}
__pyx_L3:;
/* "View.MemoryView":482
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":483
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_2 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_2;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":485
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
__pyx_v_info->strides = NULL;
}
__pyx_L4:;
/* "View.MemoryView":487
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":488
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_2 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_2;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":490
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->suboffsets = NULL;
}
__pyx_L5:;
/* "View.MemoryView":492
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":493
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_3 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_3;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":495
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
__pyx_v_info->format = NULL;
}
__pyx_L6:;
/* "View.MemoryView":497
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_4 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":498
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_5 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_5;
/* "View.MemoryView":499
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = 0
*/
__pyx_t_6 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_6;
/* "View.MemoryView":500
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = 0
* info.obj = self
*/
__pyx_t_6 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_6;
/* "View.MemoryView":501
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":502
* info.len = self.view.len
* info.readonly = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":476
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* function exit code */
__pyx_r = 0;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":509
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":510
* @cname('__pyx_memoryview_transpose')
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":511
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":512
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* property base:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":509
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":516
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":517
* @cname('__pyx_memoryview__get__base')
* def __get__(self):
* return self.obj # <<<<<<<<<<<<<<
*
* property shape:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":516
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":521
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([self.view.shape[i] for i in xrange(self.view.ndim)])
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":522
* @cname('__pyx_memoryview_get_shape')
* def __get__(self):
* return tuple([self.view.shape[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<<
*
* property strides:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_v_self->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
__pyx_t_4 = PyInt_FromSsize_t((__pyx_v_self->view.shape[__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__pyx_t_4 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
/* "View.MemoryView":521
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([self.view.shape[i] for i in xrange(self.view.ndim)])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":526
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":527
* @cname('__pyx_memoryview_get_strides')
* def __get__(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":529
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([self.view.strides[i] for i in xrange(self.view.ndim)])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":531
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([self.view.strides[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<<
*
* property suboffsets:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __pyx_v_self->view.ndim;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
__pyx_t_5 = PyInt_FromSsize_t((__pyx_v_self->view.strides[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":526
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":535
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return [-1] * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":536
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return [-1] * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":537
* def __get__(self):
* if self.view.suboffsets == NULL:
* return [-1] * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([self.view.suboffsets[i] for i in xrange(self.view.ndim)])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(1 * ((__pyx_v_self->view.ndim<0) ? 0:__pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_self->view.ndim; __pyx_temp++) {
__Pyx_INCREF(__pyx_int_neg_1);
PyList_SET_ITEM(__pyx_t_2, __pyx_temp, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
}
}
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":539
* return [-1] * self.view.ndim
*
* return tuple([self.view.suboffsets[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<<
*
* property ndim:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __pyx_v_self->view.ndim;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
__pyx_t_5 = PyInt_FromSsize_t((__pyx_v_self->view.suboffsets[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":535
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return [-1] * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":543
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":544
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* property itemsize:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":543
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":548
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":549
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* property nbytes:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":548
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* property size:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":553
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":558
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":559
* @cname('__pyx_memoryview_get_size')
* def __get__(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":560
* def __get__(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.shape:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":562
* result = 1
*
* for length in self.shape: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (PyList_CheckExact(__pyx_t_3) || PyTuple_CheckExact(__pyx_t_3)) {
__pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
for (;;) {
if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_3 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_3)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_3);
}
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":563
*
* for length in self.shape:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_3);
__pyx_t_3 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":565
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":567
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":558
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":569
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":570
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":571
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
}
/* "View.MemoryView":573
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":569
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":576
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":577
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":576
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":579
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":580
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":579
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":583
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":586
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":587
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":583
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":589
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":592
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":593
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":589
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":595
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":597
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":599
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":600
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":605
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 605; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":595
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":607
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":609
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":611
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":612
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 612; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":617
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 617; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":607
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":621
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":622
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":623
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":624
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":621
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":627
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":628
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, ((PyObject *)__pyx_memoryview_type));
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":627
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":630
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":635
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":636
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":638
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":640
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":641
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":642
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":643
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (PyList_CheckExact(__pyx_v_tup) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext;
}
for (;;) {
if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":644
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":645
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":646
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_9 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_t_7);
PyList_SET_ITEM(__pyx_t_9, __pyx_temp, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
}
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_10 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_9); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":647
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":649
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_t_9 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = __Pyx_PyList_Append(__pyx_v_result, __pyx_t_9); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__pyx_L7:;
/* "View.MemoryView":650
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":652
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
__pyx_t_1 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
__pyx_t_1 = ((!(__Pyx_PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_11 = __pyx_t_1;
} else {
__pyx_t_11 = __pyx_t_2;
}
if (__pyx_t_11) {
/* "View.MemoryView":653
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_7, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_9, 0, 0, 0);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":655
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
if (!__pyx_v_have_slices) {
__pyx_t_11 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = __pyx_t_11;
} else {
__pyx_t_2 = __pyx_v_have_slices;
}
__pyx_v_have_slices = __pyx_t_2;
/* "View.MemoryView":656
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_10 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L6:;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":658
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":659
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_2 = (__pyx_v_nslices != 0);
if (__pyx_t_2) {
/* "View.MemoryView":660
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_t_3);
PyList_SET_ITEM(__pyx_t_4, __pyx_temp, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
}
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_10 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_4); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":662
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_2) {
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = __pyx_t_3;
__pyx_t_3 = 0;
} else {
__pyx_t_9 = __pyx_t_4;
__pyx_t_4 = 0;
}
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_9 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":630
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":664
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* cdef int i
* for i in range(ndim):
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":666
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* cdef int i
* for i in range(ndim): # <<<<<<<<<<<<<<
* if suboffsets[i] >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":667
* cdef int i
* for i in range(ndim):
* if suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_3 = (((__pyx_v_suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":668
* for i in range(ndim):
* if suboffsets[i] >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
/* "View.MemoryView":664
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* cdef int i
* for i in range(ndim):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":675
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":676
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":683
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)));
/* "View.MemoryView":687
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 687; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/* "View.MemoryView":689
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":690
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":691
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":693
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":694
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":700
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":701
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":706
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":707
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":711
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (PyList_CheckExact(__pyx_v_indices) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext;
}
for (;;) {
if (!__pyx_t_8 && PyList_CheckExact(__pyx_t_3)) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_8 && PyTuple_CheckExact(__pyx_t_3)) {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":712
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (__Pyx_PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":716
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":713
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 713; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
/* "View.MemoryView":719
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":720
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":721
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":722
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1;
/* "View.MemoryView":723
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":725
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_INCREF(__pyx_int_0);
__pyx_t_12 = __pyx_int_0;
} else {
__pyx_t_12 = __pyx_t_9;
__pyx_t_9 = 0;
}
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_12); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":726
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_INCREF(__pyx_int_0);
__pyx_t_9 = __pyx_int_0;
} else {
__pyx_t_9 = __pyx_t_12;
__pyx_t_12 = 0;
}
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":727
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_INCREF(__pyx_int_0);
__pyx_t_12 = __pyx_int_0;
} else {
__pyx_t_12 = __pyx_t_9;
__pyx_t_9 = 0;
}
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_12); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":729
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = (__pyx_t_12 != Py_None);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":730
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = (__pyx_t_12 != Py_None);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":731
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = (__pyx_t_12 != Py_None);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":733
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":739
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":741
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":742
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":743
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":744
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 744; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":742
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 742; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 742; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":747
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":748
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 747; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":747
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 747; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":675
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_12);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":772
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":792
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":794
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":795
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":796
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":797
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":800
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
__pyx_t_2 = (__pyx_v_have_step != 0);
if (__pyx_t_2) {
__pyx_t_1 = (__pyx_v_step < 0);
__pyx_t_4 = __pyx_t_1;
} else {
__pyx_t_4 = __pyx_t_2;
}
__pyx_v_negative_step = __pyx_t_4;
/* "View.MemoryView":802
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
if ((__pyx_v_have_step != 0)) {
__pyx_t_4 = (__pyx_v_step == 0);
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = (__pyx_v_have_step != 0);
}
if (__pyx_t_2) {
/* "View.MemoryView":803
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
/* "View.MemoryView":806
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":807
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":808
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":809
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":810
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
goto __pyx_L9;
}
__pyx_L9:;
goto __pyx_L8;
}
/* "View.MemoryView":811
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":812
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":813
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L10;
}
/*else*/ {
/* "View.MemoryView":815
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_start = __pyx_v_shape;
}
__pyx_L10:;
goto __pyx_L8;
}
__pyx_L8:;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":817
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":818
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L11;
}
/*else*/ {
/* "View.MemoryView":820
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
__pyx_v_start = 0;
}
__pyx_L11:;
}
__pyx_L7:;
/* "View.MemoryView":822
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":823
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":824
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":825
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":826
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
goto __pyx_L14;
}
__pyx_L14:;
goto __pyx_L13;
}
/* "View.MemoryView":827
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":828
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
goto __pyx_L13;
}
__pyx_L13:;
goto __pyx_L12;
}
/*else*/ {
/* "View.MemoryView":830
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":831
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1;
goto __pyx_L15;
}
/*else*/ {
/* "View.MemoryView":833
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L15:;
}
__pyx_L12:;
/* "View.MemoryView":835
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":836
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
goto __pyx_L16;
}
__pyx_L16:;
/* "View.MemoryView":840
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":842
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
goto __pyx_L17;
}
__pyx_L17:;
/* "View.MemoryView":845
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":846
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
goto __pyx_L18;
}
__pyx_L18:;
/* "View.MemoryView":849
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":850
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":851
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":854
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":855
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
goto __pyx_L19;
}
/*else*/ {
/* "View.MemoryView":857
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L19:;
/* "View.MemoryView":859
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":860
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":862
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
goto __pyx_L22;
}
/*else*/ {
/* "View.MemoryView":864
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L22:;
goto __pyx_L21;
}
/*else*/ {
/* "View.MemoryView":867
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L21:;
goto __pyx_L20;
}
__pyx_L20:;
/* "View.MemoryView":869
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":772
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":875
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":877
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1;
/* "View.MemoryView":878
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":881
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":882
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize);
/* "View.MemoryView":883
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":885
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":886
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":887
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":888
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
goto __pyx_L4;
}
__pyx_L4:;
}
__pyx_L3:;
/* "View.MemoryView":890
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":891
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":892
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":893
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":895
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":898
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":899
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":900
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
goto __pyx_L8;
}
__pyx_L8:;
/* "View.MemoryView":902
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":875
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":908
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":909
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":911
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":912
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":916
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = (__pyx_v_ndim / 2);
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":917
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":918
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_4 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_4;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":919
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_5 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_4 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_4;
/* "View.MemoryView":921
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_6 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_6) {
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_8 = __pyx_t_7;
} else {
__pyx_t_8 = __pyx_t_6;
}
if (__pyx_t_8) {
/* "View.MemoryView":922
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 922; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
}
/* "View.MemoryView":924
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":908
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":941
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":942
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":941
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":944
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":945
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":946
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":948
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_vtabptr_memoryview->convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":944
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":950
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":951
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":952
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":954
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* property base:
*/
__pyx_t_3 = __pyx_vtabptr_memoryview->assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":950
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":958
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":959
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":958
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":965
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":974
* cdef int i
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":975
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
}
/* "View.MemoryView":980
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryviewslice_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":982
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":983
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":985
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 985; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":986
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":988
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":989
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":990
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":991
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":992
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* result.flags = PyBUF_RECORDS
*/
Py_INCREF(Py_None);
/* "View.MemoryView":994
* Py_INCREF(Py_None)
*
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":996
* result.flags = PyBUF_RECORDS
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":997
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":998
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1000
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for i in range(ndim):
* result.view.len *= result.view.shape[i]
*/
__pyx_t_6 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_6;
/* "View.MemoryView":1001
*
* result.view.len = result.view.itemsize
* for i in range(ndim): # <<<<<<<<<<<<<<
* result.view.len *= result.view.shape[i]
*
*/
__pyx_t_7 = __pyx_v_ndim;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "View.MemoryView":1002
* result.view.len = result.view.itemsize
* for i in range(ndim):
* result.view.len *= result.view.shape[i] # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_v_result->__pyx_base.view.len = (__pyx_v_result->__pyx_base.view.len * (__pyx_v_result->__pyx_base.view.shape[__pyx_v_i]));
}
/* "View.MemoryView":1004
* result.view.len *= result.view.shape[i]
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1005
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1007
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1010
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1013
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1014
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1015
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1017
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1018
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1010
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1021
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1025
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1026
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1027
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1029
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1030
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1032
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_dim = __pyx_t_3;
/* "View.MemoryView":1033
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* if suboffsets == NULL:
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1034
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* if suboffsets == NULL:
* dst.suboffsets[dim] = -1
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1035
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* if suboffsets == NULL: # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = -1
* else:
*/
__pyx_t_4 = ((__pyx_v_suboffsets == NULL) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1036
* dst.strides[dim] = strides[dim]
* if suboffsets == NULL:
* dst.suboffsets[dim] = -1 # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[dim] = suboffsets[dim]
*/
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = -1;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":1038
* dst.suboffsets[dim] = -1
* else:
* dst.suboffsets[dim] = suboffsets[dim] # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = (__pyx_v_suboffsets[__pyx_v_dim]);
}
__pyx_L5:;
}
/* "View.MemoryView":1021
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1044
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1045
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1055
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1059
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1060
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1062
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1064
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1062; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1070
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1071
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1072
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1074
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1070
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1077
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1082
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1083
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1085
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1086
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1087
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1088
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
}
}
__pyx_L4_break:;
/* "View.MemoryView":1090
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1091
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1092
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1093
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
}
}
__pyx_L7_break:;
/* "View.MemoryView":1095
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1096
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1098
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1077
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1101
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1108
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1109
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1110
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1111
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1113
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1114
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_1 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_1) {
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1115
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_3 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_3) {
__pyx_t_3 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_4 = (__pyx_t_3 != 0);
} else {
__pyx_t_4 = __pyx_t_2;
}
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
/* "View.MemoryView":1116
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent));
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":1118
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
__pyx_t_5 = __pyx_v_dst_extent;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1119
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize);
/* "View.MemoryView":1120
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1121
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1123
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
__pyx_t_5 = __pyx_v_dst_extent;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1124
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1128
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1129
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1101
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1131
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1134
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1131
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1138
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1141
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1143
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1144
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1146
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1138
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1149
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1158
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1159
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_idx = __pyx_t_3;
/* "View.MemoryView":1160
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1161
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1163
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1164
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1165
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1167
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1149
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1170
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1181
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1182
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1184
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1185
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1186
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1189
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1190
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1191
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1192
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1193
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1195
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order);
/* "View.MemoryView":1199
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1200
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1201
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src, order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1203
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1204
*
* if slice_is_contig(src, order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size);
goto __pyx_L9;
}
/*else*/ {
/* "View.MemoryView":1206
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1208
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1170
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1213
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1216
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1215
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1213
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1219
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1220
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_1 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyUnicode_Format(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_error, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1219
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1224
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1225
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_v_error, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/*else*/ {
/* "View.MemoryView":1227
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1227; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1230
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1238
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1239
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1241
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1242
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1243
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1246
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1247
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
goto __pyx_L3;
}
/* "View.MemoryView":1248
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1249
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1251
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1253
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1254
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1255
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1256
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1257
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":1259
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
__pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L7:;
goto __pyx_L6;
}
__pyx_L6:;
/* "View.MemoryView":1261
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1262
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1262; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1264
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(&src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1266
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1267
*
* if not slice_is_contig(&src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
goto __pyx_L10;
}
__pyx_L10:;
/* "View.MemoryView":1269
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1269; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_tmpdata = __pyx_t_6;
/* "View.MemoryView":1270
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":1272
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1275
*
*
* if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1276
*
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim);
goto __pyx_L12;
}
/* "View.MemoryView":1277
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1278
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim);
goto __pyx_L12;
}
__pyx_L12:;
/* "View.MemoryView":1280
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1282
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1283
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* return 0
*/
memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim));
/* "View.MemoryView":1284
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* return 0
*
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1285
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
}
goto __pyx_L11;
}
__pyx_L11:;
/* "View.MemoryView":1287
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_7 = (__pyx_t_2 != 0);
if (__pyx_t_7) {
/* "View.MemoryView":1290
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1291
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1291; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L14;
}
__pyx_L14:;
/* "View.MemoryView":1293
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1294
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1295
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1297
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1298
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1230
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1301
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *slice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_slice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
/* "View.MemoryView":1305
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1307
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* slice.shape[i + offset] = slice.shape[i]
* slice.strides[i + offset] = slice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1308
*
* for i in range(ndim - 1, -1, -1):
* slice.shape[i + offset] = slice.shape[i] # <<<<<<<<<<<<<<
* slice.strides[i + offset] = slice.strides[i]
* slice.suboffsets[i + offset] = slice.suboffsets[i]
*/
(__pyx_v_slice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->shape[__pyx_v_i]);
/* "View.MemoryView":1309
* for i in range(ndim - 1, -1, -1):
* slice.shape[i + offset] = slice.shape[i]
* slice.strides[i + offset] = slice.strides[i] # <<<<<<<<<<<<<<
* slice.suboffsets[i + offset] = slice.suboffsets[i]
*
*/
(__pyx_v_slice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->strides[__pyx_v_i]);
/* "View.MemoryView":1310
* slice.shape[i + offset] = slice.shape[i]
* slice.strides[i + offset] = slice.strides[i]
* slice.suboffsets[i + offset] = slice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_slice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1312
* slice.suboffsets[i + offset] = slice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* slice.shape[i] = 1
* slice.strides[i] = slice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1313
*
* for i in range(offset):
* slice.shape[i] = 1 # <<<<<<<<<<<<<<
* slice.strides[i] = slice.strides[0]
* slice.suboffsets[i] = -1
*/
(__pyx_v_slice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1314
* for i in range(offset):
* slice.shape[i] = 1
* slice.strides[i] = slice.strides[0] # <<<<<<<<<<<<<<
* slice.suboffsets[i] = -1
*
*/
(__pyx_v_slice->strides[__pyx_v_i]) = (__pyx_v_slice->strides[0]);
/* "View.MemoryView":1315
* slice.shape[i] = 1
* slice.strides[i] = slice.strides[0]
* slice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_slice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1301
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *slice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1323
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1327
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1328
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1323
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1335
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1338
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1342
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1343
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_3 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1344
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_3 = (__pyx_v_inc != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1345
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":1347
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":1349
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1352
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1338
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1358
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1361
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1362
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1364
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1358
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "View.MemoryView":1372
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1373
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1375
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1376
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1377
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize);
/* "View.MemoryView":1378
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1380
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1381
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1383
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return get_memview(o);
}
static PyMethodDef __pyx_methods_array[] = {
{__Pyx_NAMESTR("__getattr__"), (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, __Pyx_DOCSTR(0)},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
0, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
0, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
__pyx_array_getbuffer, /*bf_getbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
0, /*bf_releasebuffer*/
#endif
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table.array"), /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table.Enum"), /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_transpose(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview__get__base(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_shape(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_strides(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_suboffsets(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_ndim(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_itemsize(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_nbytes(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_size(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{__Pyx_NAMESTR("is_c_contig"), (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, __Pyx_DOCSTR(0)},
{__Pyx_NAMESTR("is_f_contig"), (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, __Pyx_DOCSTR(0)},
{__Pyx_NAMESTR("copy"), (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, __Pyx_DOCSTR(0)},
{__Pyx_NAMESTR("copy_fortran"), (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, __Pyx_DOCSTR(0)},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
0, /*bf_releasebuffer*/
#endif
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table.memoryview"), /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryviewslice__get__base(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table._memoryviewslice"), /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
__Pyx_DOCSTR("Internal class for passing memoryview slices to Python"), /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{__Pyx_NAMESTR("build_table"), (PyCFunction)__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
__Pyx_NAMESTR("par_build_table"),
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_b_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 0, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1},
{&__pyx_n_s_decode, __pyx_k_decode, sizeof(__pyx_k_decode), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_first_pause, __pyx_k_first_pause, sizeof(__pyx_k_first_pause), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_b_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 0, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_max_beats, __pyx_k_max_beats, sizeof(__pyx_k_max_beats), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_min_beats, __pyx_k_min_beats, sizeof(__pyx_k_min_beats), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_penalty, __pyx_k_penalty, sizeof(__pyx_k_penalty), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_trans_cost, __pyx_k_trans_cost, sizeof(__pyx_k_trans_cost), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_or_stri, __pyx_k_unable_to_allocate_shape_or_stri, sizeof(__pyx_k_unable_to_allocate_shape_or_stri), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if PY_MAJOR_VERSION >= 3
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "radiotool/algorithms/par_build_table.pyx":512
*
* # double arrays for use throughout the computation
* cdef array dtemplate = array('d') # <<<<<<<<<<<<<<
* cdef array array1, array2, array3, array4, array5, array6, array7, array8
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_n_s_d); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "radiotool/algorithms/par_build_table.pyx":532
* mv6 = array8
*
* cdef array ar, template = array('i') # <<<<<<<<<<<<<<
* ar = clone(template, penalty.shape[1], False)
* cdef int[:] global_path = ar
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_n_s_i); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":124
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if self.itemsize <= 0:
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":127
*
* if self.itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* encode = getattr(format, 'encode', None)
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":131
* encode = getattr(format, 'encode', None)
* if encode:
* format = encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format
* self.format = self._format
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":141
* free(self._shape)
* free(self._strides)
* raise MemoryError("unable to allocate shape or strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_or_stri); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":166
* decode = getattr(mode, 'decode', None)
* if decode:
* mode = decode('ASCII') # <<<<<<<<<<<<<<
* self.mode = mode
*
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":453
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":529
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([self.view.strides[i] for i in xrange(self.view.ndim)])
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":646
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_tuple__12 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":649
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_tuple__13 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":660
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_tuple__14 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":668
* for i in range(ndim):
* if suboffsets[i] >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 287; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__20)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initpar_build_table(void); /*proto*/
PyMODINIT_FUNC initpar_build_table(void)
#else
PyMODINIT_FUNC PyInit_par_build_table(void); /*proto*/
PyMODINIT_FUNC PyInit_par_build_table(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_par_build_table(void)", 0);
if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#ifdef __Pyx_CyFunction_USED
if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4(__Pyx_NAMESTR("par_build_table"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
if (__pyx_module_is_main_radiotool__algorithms__par_build_table) {
if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!PyDict_GetItemString(modules, "radiotool.algorithms.par_build_table")) {
if (unlikely(PyDict_SetItemString(modules, "radiotool.algorithms.par_build_table", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/*--- Builtin init code ---*/
if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Constants init code ---*/
if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_array.tp_print = 0;
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_MemviewEnum.tp_print = 0;
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryview.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 930; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 930; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
/*--- Type import code ---*/
__pyx_ptype_7cpython_5array_array = __Pyx_ImportType("array", "array", sizeof(arrayobject), 0); if (unlikely(!__pyx_ptype_7cpython_5array_array)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
/* "radiotool/algorithms/par_build_table.pyx":1
* #cython: infer_types=True # <<<<<<<<<<<<<<
* #cython: boundscheck=False
* #cython: wraparound=False
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":207
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 287; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":504
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":961
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
__Pyx_AddTraceback("init radiotool.algorithms.par_build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init radiotool.algorithms.par_build_table");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* Runtime support code */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif /* CYTHON_REFNANNY */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1) /* First char was not a digit */
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count; /* Consume from buffer string */
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break; /* breaks both loops as ctx->enc_count == 0 */
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue; /* empty struct */
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break; /* not a 'break' in the loop */
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case 10:
case 13:
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T': /* substruct */
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}': /* end of substruct; either repeat or move on */
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
} /* fall through */
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 's': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
} else {
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
}
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (!buf) {
PyErr_SetString(PyExc_ValueError,
"buf is NULL.");
goto fail;
} else if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) {
va_list vargs;
char msg[200];
va_start(vargs, fmt);
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
Py_FatalError(msg);
va_end(vargs);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return; /* allow uninitialized memoryview assignment */
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_Restore(type, value, tb);
#endif
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(type, value, tb);
#endif
}
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
#if PY_VERSION_HEX >= 0x02060000
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
#endif
result = (*call)(func, arg, kw);
#if PY_VERSION_HEX >= 0x02060000
Py_LeaveRecursiveCall();
#endif
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
#if PY_VERSION_HEX < 0x02050000
if (PyClass_Check(type)) {
#else
if (PyType_Check(type)) {
#endif
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
#if PY_VERSION_HEX < 0x02050000
if (PyInstance_Check(type)) {
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
} else {
type = 0;
PyErr_SetString(PyExc_TypeError,
"raise: exception must be an old-style class or instance");
goto raise_error;
}
#else
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
#endif
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else /* Python 3+ */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
if (PyObject_IsSubclass(instance_class, type)) {
type = instance_class;
} else {
instance_class = NULL;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
goto bad;
PyErr_Clear();
r = d;
Py_INCREF(d);
}
return r;
bad:
return NULL;
}
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
#if CYTHON_PEP393_ENABLED
if (unlikely(PyUnicode_READY(s1) < 0) || unlikely(PyUnicode_READY(s2) < 0))
return -1;
#endif
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, length * kind);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
length = strlen(cstring);
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
#else
PyErr_GetExcInfo(type, value, tb);
#endif
}
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(type, value, tb);
#endif
}
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (PyErr_ExceptionMatches(PyExc_OverflowError))
PyErr_Clear();
else
return NULL;
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_7cpython_5array_array)) return __pyx_pw_7cpython_5array_5array_1__getbuffer__(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
#if PY_VERSION_HEX < 0x02060000
if (obj->ob_type->tp_dict) {
PyObject *getbuffer_cobj = PyObject_GetItem(
obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer);
if (getbuffer_cobj) {
getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj);
Py_DECREF(getbuffer_cobj);
if (!func)
goto fail;
return func(obj, view, flags);
} else {
PyErr_Clear();
}
}
#endif
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
#if PY_VERSION_HEX < 0x02060000
fail:
#endif
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_7cpython_5array_array)) { __pyx_pw_7cpython_5array_5array_3__releasebuffer__(obj, view); return; }
#if PY_VERSION_HEX < 0x02060000
if (obj->ob_type->tp_dict) {
PyObject *releasebuffer_cobj = PyObject_GetItem(
obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer);
if (releasebuffer_cobj) {
releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj);
Py_DECREF(releasebuffer_cobj);
if (!func)
goto fail;
func(obj, view);
return;
} else {
PyErr_Clear();
}
}
#endif
goto nofail;
#if PY_VERSION_HEX < 0x02060000
fail:
#endif
PyErr_WriteUnraisable(obj);
nofail:
Py_DECREF(obj);
view->obj = NULL;
}
#endif /* PY_MAJOR_VERSION < 3 */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \
{ \
func_type value = func(x); \
if (sizeof(target_type) < sizeof(func_type)) { \
if (unlikely(value != (func_type) (target_type) value)) { \
func_type zero = 0; \
PyErr_SetString(PyExc_OverflowError, \
(is_unsigned && unlikely(value < zero)) ? \
"can't convert negative value to " #target_type : \
"value too large to convert to " #target_type); \
return (target_type) -1; \
} \
} \
return (target_type) value; \
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG)
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(int)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return (int) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong)
} else if (sizeof(int) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong)
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(int)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return +(int) ((PyLongObject*)x)->ob_digit[0];
case -1: return -(int) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong)
} else if (sizeof(int) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong)
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(int) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(long) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
static PyObject *__pyx_memview_get_int(const char *itemp) {
return (PyObject *) __Pyx_PyInt_From_int(*(int *) itemp);
}
static int __pyx_memview_set_int(const char *itemp, PyObject *obj) {
int value = __Pyx_PyInt_As_int(obj);
if ((value == (int)-1) && PyErr_Occurred())
return 0;
*(int *) itemp = value;
return 1;
}
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs->memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize)
return 0;
itemsize *= mvs->shape[index];
}
return 1;
}
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0)
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
#if PY_VERSION_HEX >= 0x02050000
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0; /* try absolute import on failure */
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
#else
if (level>0) {
PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
goto bad;
}
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, NULL);
#endif
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG)
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(char)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return (char) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, PyLong_AsUnsignedLong)
} else if (sizeof(char) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long long, PyLong_AsUnsignedLongLong)
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(char)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return +(char) ((PyLongObject*)x)->ob_digit[0];
case -1: return -(char) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyLong_AsLong)
} else if (sizeof(char) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(char, long long, PyLong_AsLongLong)
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG)
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(long)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return (long) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong)
} else if (sizeof(long) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong)
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(long)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return +(long) ((PyLongObject*)x)->ob_digit[0];
case -1: return -(long) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong)
} else if (sizeof(long) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong)
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
#if PY_VERSION_HEX < 0x02050000
return PyErr_Warn(NULL, message);
#else
return PyErr_WarnEx(NULL, message, 1);
#endif
}
return 0;
}
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
#if PY_VERSION_HEX < 0x02050000
if (PyErr_Warn(NULL, warning) < 0) goto bad;
#else
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
#endif
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = (start + end) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0, /*int argcount,*/
0, /*int kwonlyargcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line, /*int firstlineno,*/
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_globals = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_globals = PyModule_GetDict(__pyx_m);
if (!py_globals) goto bad;
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = py_line;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else /* Python 3+ has unicode identifiers */
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else /* PY_VERSION_HEX < 0x03030000 */
if (PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_DATA_SIZE(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
return PyUnicode_AsUTF8AndSize(o, length);
#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
#endif /* PY_VERSION_HEX < 0x03030000 */
} else
#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
#if !CYTHON_COMPILING_IN_PYPY
#if PY_VERSION_HEX >= 0x02060000
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b)))
return PyInt_AS_LONG(b);
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(b)) {
case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
case 0: return 0;
case 1: return ((PyLongObject*)b)->ob_digit[0];
}
#endif
#endif
#if PY_VERSION_HEX < 0x02060000
return PyInt_AsSsize_t(b);
#else
return PyLong_AsSsize_t(b);
#endif
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
#if PY_VERSION_HEX < 0x02050000
if (ival <= LONG_MAX)
return PyInt_FromLong((long)ival);
else {
unsigned char *bytes = (unsigned char *) &ival;
int one = 1; int little = (int)*(unsigned char*)&one;
return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
}
#else
return PyInt_FromSize_t(ival);
#endif
}
#endif /* Py_PYTHON_H */
|
2.norace2.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
for (int i = 1; i < N; i++)
#pragma omp simd
for (int j = 1; j < N; j++)
A[i][j] = A[i][j] + i + j;
}
// CHECK: Region is Data Race Free.
// END
|
GB_add_phase0.c | //------------------------------------------------------------------------------
// GB_add_phase0: find vectors of C to compute for C=A+B or C<M>=A+B
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// The eWise add of two matrices, C=A+B, C<M>=A+B, or C<!M>=A+B starts with
// this phase, which determines which vectors of C need to be computed.
// This phase is also used for GB_masker.
// On input, A and B are the two matrices being added, and M is the optional
// mask matrix (not complemented). The complemented mask is handed in GB_mask,
// not here.
// The A matrix can be sparse, hypersparse, slice, or hyperslice. The B matrix
// can only be sparse or hypersparse. See GB_wait, which can pass in A as any
// of the four formats. In this case, no mask is present.
// On output, an integer (Cnvec) a boolean (Ch_to_Mh) and up to 3 arrays are
// returned, either NULL or of size Cnvec. Let n = A->vdim be the vector
// dimension of A, B, M and C.
// Ch: the list of vectors to compute. If not NULL, Ch [k] = j is the
// kth vector in C to compute, which will become the hyperlist C->h of C.
// Note that some of these vectors may turn out to be empty, because of
// the mask, or because the vector j appeared in A or B, but is empty.
// It is pruned at the end of GB_add_phase2. If Ch is NULL then it is an
// implicit list of size n, and Ch [k] == k for all k = 0:n-1. In this
// case, C will be a standard matrix, not hypersparse. Thus, the kth
// vector is j = (Ch == NULL) ? k : Ch [k].
// Ch is freed by GB_add if phase1 fails. phase2 either frees it or
// transplants it into C.
// Ch_is_Mh: true if the mask M is present, hypersparse, and not
// complemented, false otherwise. In this case Ch is a deep copy of Mh.
// Only GB_add uses this option; it is not used by GB_masker (Ch_is_Mh
// is always false for GB_masker). This is determined by passing in
// p_Ch_is_Mh as a NULL or non-NULL pointer.
// C_to_A: if A is hypersparse, then C_to_A [k] = kA if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in A, as j = Ah [kA]. If j does
// not appear in A, then C_to_A [k] = -1. If A is not hypersparse, then
// C_to_A is returned as NULL.
// C_to_B: if B is hypersparse, then C_to_B [k] = kB if the kth vector, j
// = (Ch == NULL) ? k : Ch [k] appears in B, as j = Bh [kB]. If j does
// not appear in B, then C_to_B [k] = -1. If B is not hypersparse, then
// C_to_B is returned as NULL.
// C_to_M: if M is hypersparse, and Ch_is_Mh is false, then C_to_M [k] =
// kM if the kth vector, j = (Ch == NULL) ? k : Ch [k] appears in M, as j
// = Mh [kM]. If j does not appear in M, then C_to_M [k] = -1. If M is
// not hypersparse, then C_to_M is returned as NULL.
#include "GB_add.h"
#define GB_FREE_WORK \
{ \
GB_FREE_MEMORY (kA_start, ntasks+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (kB_start, ntasks+1, sizeof (int64_t)) ; \
GB_FREE_MEMORY (kC_start, ntasks+1, sizeof (int64_t)) ; \
}
//------------------------------------------------------------------------------
// GB_allocate_result
//------------------------------------------------------------------------------
static inline bool GB_allocate_result
(
int64_t Cnvec,
int64_t *restrict *Ch_handle,
int64_t *restrict *C_to_M_handle,
int64_t *restrict *C_to_A_handle,
int64_t *restrict *C_to_B_handle
)
{
bool ok = true ;
if (Ch_handle != NULL)
{
GB_MALLOC_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
ok = (*Ch_handle != NULL) ;
}
if (C_to_M_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_M_handle != NULL) ;
}
if (C_to_A_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_A_handle != NULL) ;
}
if (C_to_B_handle != NULL)
{
GB_MALLOC_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
ok = ok && (*C_to_B_handle != NULL) ;
}
if (!ok)
{
// out of memory
if (Ch_handle != NULL)
{
GB_FREE_MEMORY (*Ch_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_M_handle != NULL)
{
GB_FREE_MEMORY (*C_to_M_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_A_handle != NULL)
{
GB_FREE_MEMORY (*C_to_A_handle, Cnvec, sizeof (int64_t)) ;
}
if (C_to_B_handle != NULL)
{
GB_FREE_MEMORY (*C_to_B_handle, Cnvec, sizeof (int64_t)) ;
}
}
return (ok) ;
}
//------------------------------------------------------------------------------
// GB_add_phase0: find the vectors of C for C<M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB_add_phase0 // find vectors in C for C=A+B or C<M>=A+B
(
int64_t *p_Cnvec, // # of vectors to compute in C
int64_t *restrict *Ch_handle, // Ch: size Cnvec, or NULL
int64_t *restrict *C_to_M_handle, // C_to_M: size Cnvec, or NULL
int64_t *restrict *C_to_A_handle, // C_to_A: size Cnvec, or NULL
int64_t *restrict *C_to_B_handle, // C_to_B: of size Cnvec, or NULL
bool *p_Ch_is_Mh, // if true, then Ch == Mh
const GrB_Matrix M, // optional mask, may be NULL; not complemented
const GrB_Matrix A, // standard, hypersparse, slice, or hyperslice
const GrB_Matrix B, // standard or hypersparse; never a slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_Cnvec != NULL) ;
ASSERT (Ch_handle != NULL) ;
ASSERT (C_to_A_handle != NULL) ;
ASSERT (C_to_B_handle != NULL) ;
ASSERT_OK (GB_check (A, "A for add phase0", GB0)) ;
ASSERT_OK (GB_check (B, "B for add phase0", GB0)) ;
ASSERT_OK_OR_NULL (GB_check (M, "M for add phase0", GB0)) ;
ASSERT (A->vdim == B->vdim) ;
ASSERT (GB_IMPLIES (M != NULL, A->vdim == M->vdim)) ;
//--------------------------------------------------------------------------
// initializations
//--------------------------------------------------------------------------
int64_t *restrict Ch = NULL ;
int64_t *restrict C_to_M = NULL ;
int64_t *restrict C_to_A = NULL ;
int64_t *restrict C_to_B = NULL ;
(*Ch_handle) = NULL ;
(*C_to_A_handle) = NULL ;
(*C_to_B_handle) = NULL ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = NULL ;
}
int64_t *restrict kA_start = NULL ;
int64_t *restrict kB_start = NULL ;
int64_t *restrict kC_start = NULL ;
int ntasks = 0 ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = 1 ; // nthreads depends on Cnvec, computed below
//--------------------------------------------------------------------------
// get content of M, A, and B
//--------------------------------------------------------------------------
int64_t Cnvec ;
int64_t n = A->vdim ;
int64_t Anvec = A->nvec ;
bool A_is_hyper = A->is_hyper ;
bool A_is_slice = A->is_slice ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = (A_is_hyper) ? A->h : NULL ;
const int64_t A_hfirst = A->hfirst ;
#define GB_Ah(k) (A_is_hyper ? Ah [k] : (A_hfirst + (k)))
int64_t Bnvec = B->nvec ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
bool B_is_hyper = B->is_hyper ;
ASSERT (!B->is_slice) ;
int64_t Mnvec = 0 ;
const int64_t *restrict Mp = NULL ;
const int64_t *restrict Mh = NULL ;
bool M_is_hyper = false ;
if (M != NULL)
{
Mnvec = M->nvec ;
Mp = M->p ;
Mh = M->h ;
M_is_hyper = M->is_hyper ;
ASSERT (!M->is_slice) ;
}
// For GB_add, if M is present, hypersparse, and not complemented, then C
// will be hypersparse, and it will have set of vectors as M (Ch == Mh).
// For GB_masker, Ch is never equal to Mh.
bool Ch_is_Mh = (p_Ch_is_Mh != NULL) && (M != NULL && M_is_hyper) ;
//--------------------------------------------------------------------------
// find the set union of the non-empty vectors of A and B
//--------------------------------------------------------------------------
if (Ch_is_Mh)
{
//----------------------------------------------------------------------
// C is hypersparse, with the same vectors as the hypersparse M
//----------------------------------------------------------------------
// This step is done for GB_add only, not GB_masker.
// GB_wait is the only place where A may be a slice, and it does not
// use a mask. So this phase can ignore the case where A is a slice.
Cnvec = Mnvec ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
ASSERT (!A_is_slice) ;
if (!GB_allocate_result (Cnvec, &Ch, NULL,
(A_is_hyper) ? (&C_to_A) : NULL, (B_is_hyper) ? (&C_to_B) : NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
// copy Mh into Ch. Ch is Mh so C_to_M is not needed.
GB_memcpy (Ch, Mh, Mnvec * sizeof (int64_t), nthreads) ;
// construct the mapping from C to A and B, if they are hypersparse
if (A_is_hyper || B_is_hyper)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
if (A_is_hyper)
{
// C_to_A [k] = kA if Ah [kA] == j and A(:,j) is non-empty
int64_t kA = 0, pA, pA_end ;
GB_lookup (true, Ah, Ap, &kA, Anvec-1, j, &pA, &pA_end) ;
C_to_A [k] = (pA < pA_end) ? kA : -1 ;
}
if (B_is_hyper)
{
// C_to_B [k] = kB if Bh [kB] == j and B(:,j) is non-empty
int64_t kB = 0, pB, pB_end ;
GB_lookup (true, Bh, Bp, &kB, Bnvec-1, j, &pB, &pB_end) ;
C_to_B [k] = (pB < pB_end) ? kB : -1 ;
}
}
}
}
else if ((A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse or a hyperslice, and B is hypersparse
//----------------------------------------------------------------------
// Ch is the set union of Ah and Bh. This is handled with a parallel
// merge, since Ah and Bh are both sorted lists.
//----------------------------------------------------------------------
// phase 0: create the tasks
//----------------------------------------------------------------------
double work = GB_IMIN (Anvec + Bnvec, n) ;
nthreads = GB_nthreads (work, chunk, nthreads_max) ;
ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ;
ntasks = GB_IMIN (ntasks, work) ;
// allocate workspace
GB_MALLOC_MEMORY (kA_start, ntasks+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (kB_start, ntasks+1, sizeof (int64_t)) ;
GB_MALLOC_MEMORY (kC_start, ntasks+1, sizeof (int64_t)) ;
if (kA_start == NULL || kB_start == NULL || kC_start == NULL)
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
kA_start [0] = (Anvec == 0) ? -1 : 0 ;
kB_start [0] = (Bnvec == 0) ? -1 : 0 ;
kA_start [ntasks] = (Anvec == 0) ? -1 : Anvec ;
kB_start [ntasks] = (Bnvec == 0) ? -1 : Bnvec ;
for (int taskid = 1 ; taskid < ntasks ; taskid++)
{
// create tasks: A and B are both hyper
double target_work = ((ntasks-taskid) * work) / ntasks ;
GB_slice_vector (NULL, NULL,
&(kA_start [taskid]), &(kB_start [taskid]),
0, 0, NULL, // Mi not present
0, Anvec, Ah, A_hfirst, // Ah, explicit or implicit list
0, Bnvec, Bh, // Bh, explicit list
n, // Ah and Bh have dimension n
target_work) ;
}
//----------------------------------------------------------------------
// phase 1: count the entries in the result of each task
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
int64_t kC = 0 ;
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// jA appears in A but not B
kA++ ;
}
else if (jB < jA)
{
// jB appears in B but not A
kB++ ;
}
else
{
// j = jA = jB appears in both A and B
kA++ ;
kB++ ;
}
}
kC_start [taskid] = kC + (kA_end - kA) + (kB_end - kB) ;
}
//----------------------------------------------------------------------
// phase 1b: cumulative sum of entries for each task
//----------------------------------------------------------------------
GB_cumsum (kC_start, ntasks, NULL, 1) ;
Cnvec = kC_start [ntasks] ;
//----------------------------------------------------------------------
// allocate the result
//----------------------------------------------------------------------
// C will be hypersparse, so Ch is allocated. The mask M is ignored
// for computing Ch. Ch is the set union of Ah and Bh.
if (!GB_allocate_result (Cnvec, &Ch,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
//----------------------------------------------------------------------
// phase 2: compute the result
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule (dynamic,1)
for (int taskid = 0 ; taskid < ntasks ; taskid++)
{
// merge Ah and Bh into Ch
int64_t kA = kA_start [taskid] ;
int64_t kB = kB_start [taskid] ;
int64_t kC = kC_start [taskid] ;
int64_t kA_end = kA_start [taskid+1] ;
int64_t kB_end = kB_start [taskid+1] ;
// merge Ah and Bh into Ch
for ( ; kA < kA_end && kB < kB_end ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = -1 ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
Ch [kC] = jB ;
C_to_A [kC] = -1 ; // jB does not appear in A
C_to_B [kC] = kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
Ch [kC] = jA ;
C_to_A [kC] = kA++ ;
C_to_B [kC] = kB++ ;
}
}
if (kA < kA_end)
{
// B is exhausted but A is not
for ( ; kA < kA_end ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
Ch [kC] = jA ;
C_to_A [kC] = kA ;
C_to_B [kC] = -1 ;
}
}
else if (kB < kB_end)
{
// A is exhausted but B is not
for ( ; kB < kB_end ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
Ch [kC] = jB ;
C_to_A [kC] = -1 ;
C_to_B [kC] = kB ;
}
}
ASSERT (kC == kC_start [taskid+1]) ;
}
//----------------------------------------------------------------------
// check result via a sequential merge
//----------------------------------------------------------------------
#ifdef GB_DEBUG
// merge Ah and Bh into Ch
int64_t kA = 0 ;
int64_t kB = 0 ;
int64_t kC = 0 ;
for ( ; kA < Anvec && kB < Bnvec ; kC++)
{
int64_t jA = GB_Ah (kA) ;
int64_t jB = Bh [kB] ;
if (jA < jB)
{
// append jA to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == -1) ; // jA does not appear in B
}
else if (jB < jA)
{
// append jB to Ch
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ; // jB does not appear in A
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
else
{
// j appears in both A and B; append it to Ch
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ; kA++ ;
ASSERT (C_to_B [kC] == kB) ; kB++ ;
}
}
if (kA < Anvec)
{
// B is exhausted but A is not
for ( ; kA < Anvec ; kA++, kC++)
{
// append jA to Ch
int64_t jA = GB_Ah (kA) ;
ASSERT (Ch [kC] == jA) ;
ASSERT (C_to_A [kC] == kA) ;
ASSERT (C_to_B [kC] == -1) ;
}
}
else if (kB < Bnvec)
{
// A is exhausted but B is not
for ( ; kB < Bnvec ; kB++, kC++)
{
// append jB to Ch
int64_t jB = Bh [kB] ;
ASSERT (Ch [kC] == jB) ;
ASSERT (C_to_A [kC] == -1) ;
ASSERT (C_to_B [kC] == kB) ;
}
}
ASSERT (kC == Cnvec) ;
#endif
}
else if ((A_is_hyper || A_is_slice) && !B_is_hyper)
{
//----------------------------------------------------------------------
// A is hypersparse, B is standard
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_A mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, &C_to_A, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_A [j] = -1 ;
}
// scatter Ah into C_to_A
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kA = 0 ; kA < Anvec ; kA++)
{
int64_t jA = GB_Ah (kA) ;
C_to_A [jA] = kA ;
}
}
else if (!(A_is_hyper || A_is_slice) && B_is_hyper)
{
//----------------------------------------------------------------------
// A is standard, B is hypersparse
//----------------------------------------------------------------------
// C will be standard. Construct the C_to_B mapping.
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, &C_to_B))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_B [j] = -1 ;
}
// scatter Bh into C_to_B
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kB = 0 ; kB < Bnvec ; kB++)
{
int64_t jB = Bh [kB] ;
C_to_B [jB] = kB ;
}
}
else
{
//----------------------------------------------------------------------
// A and B are both standard
//----------------------------------------------------------------------
// C will be standard
Cnvec = n ;
nthreads = GB_nthreads (Cnvec, chunk, nthreads_max) ;
if (!GB_allocate_result (Cnvec, NULL,
(M_is_hyper) ? (&C_to_M) : NULL, NULL, NULL))
{
// out of memory
GB_FREE_WORK ;
return (GB_OUT_OF_MEMORY) ;
}
}
//--------------------------------------------------------------------------
// construct C_to_M if needed
//--------------------------------------------------------------------------
if (C_to_M != NULL)
{
if (Ch != NULL)
{
// C is hypersparse
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t k = 0 ; k < Cnvec ; k++)
{
int64_t j = Ch [k] ;
// C_to_M [k] = kM if Mh [kM] == j and M(:,j) is non-empty
int64_t kM = 0, pM, pM_end ;
GB_lookup (true, Mh, Mp, &kM, Mnvec-1, j, &pM, &pM_end) ;
C_to_M [k] = (pM < pM_end) ? kM : -1 ;
}
}
else
{
// C is standard
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t j = 0 ; j < n ; j++)
{
C_to_M [j] = -1 ;
}
// scatter Mh into C_to_M
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t kM = 0 ; kM < Mnvec ; kM++)
{
int64_t jM = Mh [kM] ;
C_to_M [jM] = kM ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
(*p_Cnvec ) = Cnvec ;
if (p_Ch_is_Mh != NULL)
{
// return Ch_is_Mh to GB_add. For GB_masker, Ch is never Mh.
(*p_Ch_is_Mh) = Ch_is_Mh ;
}
(*Ch_handle ) = Ch ;
(*C_to_A_handle) = C_to_A ;
(*C_to_B_handle) = C_to_B ;
if (C_to_M_handle != NULL)
{
(*C_to_M_handle) = C_to_M ;
}
//--------------------------------------------------------------------------
// The code below describes what the output contains:
//--------------------------------------------------------------------------
#ifdef GB_DEBUG
ASSERT (A != NULL) ; // A and B are always present
ASSERT (B != NULL) ;
int64_t jlast = -1 ;
for (int64_t k = 0 ; k < Cnvec ; k++)
{
// C(:,j) is in the list, as the kth vector
int64_t j ;
if (Ch == NULL)
{
// C will be constructed as standard sparse
j = k ;
}
else
{
// C will be constructed as hypersparse
j = Ch [k] ;
}
// vectors j in Ch are sorted, and in the range 0:n-1
ASSERT (j >= 0 && j < n) ;
ASSERT (j > jlast) ;
jlast = j ;
// see if A (:,j) exists
if (C_to_A != NULL)
{
// A is hypersparse, or a slice
ASSERT (A->is_hyper || A->is_slice) ;
int64_t kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
int64_t jA = GB_Ah (kA) ;
ASSERT (j == jA) ;
}
}
else
{
// A is in standard sparse form
// C_to_A exists only if A is hypersparse
ASSERT (!(A->is_hyper || A->is_slice)) ;
}
// see if B (:,j) exists
if (C_to_B != NULL)
{
// B is hypersparse
ASSERT (B->is_hyper) ;
int64_t kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
int64_t jB = B->h [kB] ;
ASSERT (j == jB) ;
}
}
else
{
// B is in standard sparse form
// C_to_B exists only if B is hypersparse
ASSERT (!B->is_hyper) ;
}
// see if M (:,j) exists
if (Ch_is_Mh)
{
// Ch is the same as Mh
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
ASSERT (Ch != NULL && M->h != NULL && Ch [k] == M->h [k]) ;
ASSERT (C_to_M == NULL) ;
}
else if (C_to_M != NULL)
{
// M is present and hypersparse
ASSERT (M != NULL) ;
ASSERT (M->is_hyper) ;
int64_t kM = C_to_M [k] ;
ASSERT (kM >= -1 && kM < M->nvec) ;
if (kM >= 0)
{
int64_t jM = M->h [kM] ;
ASSERT (j == jM) ;
}
}
else
{
// M is not present, or in standard form
ASSERT (M == NULL || !(M->is_hyper)) ;
}
}
#endif
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
smul_glv4.c | //#define DEBUG_MODE
#ifdef DEBUG_MODE
#include <stdio.h>
#include "kernel.h"
#else
#include "_core.h"
#endif
#include "multiprecision.h"
#include "multiprecision_stack.h"
#include "finite128.h"
static inline void phi(DIV_hec_fp_2e128mc D3, const DIV_hec_fp_2e128mc D1, const CNS_hec_fp_2e128mc_glv4 cn){
fp_mul_2e128mc_x8664(D3->P, cn->prm, NULL, D1->P, cn->u1z);
fp_mul_2e128mc_x8664(D3->R, cn->prm, NULL, D1->R, cn->u0z);
fp_mul_2e128mc_x8664(D3->S, cn->prm, NULL, D1->S, cn->v1z);
fp_mul_2e128mc_x8664(D3->T, cn->prm, NULL, D1->T, cn->v0z);
fp_cpy_2e128mc_x8664(D3->Z, D1->Z);
}
void hec_fp_smul_2e128mc_bk_glv4(DIV_hec_fp_2e128mc D1, const uni kn, DIV_hec_fp_2e128mc D2, CNS_hec_fp_2e128mc_glv4 cn)
{
uni_t y1s[4*FP_LEN], y2s[4*FP_LEN], y3s[4*FP_LEN], y4s[4*FP_LEN], rs[4*FP_LEN], ts[4*FP_LEN], k0s[4*FP_LEN], k1s[4*FP_LEN], k2s[4*FP_LEN], k3s[4*FP_LEN], Ks[4*FP_LEN], Ts[4*FP_LEN];
MI_t y1, y2, y3, y4, ah1, ah2, ah3, ah4, N, r, k, t, A1, A2, A3, A4, k0, k1, k2, k3, K, T, Nt;
DIV_hec_fp_2e128mc_t d0, d1, d2, d3, tbl[16];
int j, ei, b, bt;
/*uni_t i; fp_cnt_bits(&i, kn, FP_LEN*2); printf("%d ", i);*/
ah1->v->n = (uni)cn->ah1; ah1->v->l = 3*FP_LEN/2; ah1->s = POSITIVE;
ah2->v->n = (uni)cn->ah2; ah2->v->l = 3*FP_LEN/2; ah2->s = POSITIVE;
ah3->v->n = (uni)cn->ah3; ah3->v->l = 3*FP_LEN/2; ah3->s = POSITIVE;
ah4->v->n = (uni)cn->ah4; ah4->v->l = 3*FP_LEN/2; ah4->s = NEGATIVE;
A1->v->n = (uni)cn->A1; A1->v->l = FP_LEN/2; A1->s = POSITIVE;
A2->v->n = (uni)cn->A2; A2->v->l = FP_LEN/2; A2->s = NEGATIVE;
A3->v->n = (uni)cn->A3; A3->v->l = FP_LEN/2; A3->s = NEGATIVE;
A4->v->n = (uni)cn->A4; A4->v->l = FP_LEN/2; A4->s = POSITIVE;
N->v->n = (uni)cn->N; N->v->l = 2*FP_LEN; N->s = POSITIVE;
Nt->v->n = (uni)cn->Nhalf; Nt->v->l = 2*FP_LEN; Nt->s = POSITIVE;
k->v->n = kn; k->v->l = 2*FP_LEN; k->s = POSITIVE;
y1->v->n = y1s; y2->v->n = y2s; y3->v->n = y3s; y4->v->n = y4s;
k0->v->n = k0s; k1->v->n = k1s; k2->v->n = k2s; k3->v->n = k3s;
t->v->n = ts; r->v->n = rs; T->v->n = Ts; K->v->n = Ks;
mi_mul_stack(t, ah1, k); mi_div_q_r_stack(y1, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y1, y1, 1);
}
else{
mi_sub_1_stack(y1, y1, 1);
}
}
mi_mul_stack(t, ah2, k); mi_div_q_r_stack(y2, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y2, y2, 1);
}
else{
mi_sub_1_stack(y2, y2, 1);
}
}
mi_mul_stack(t, ah3, k); mi_div_q_r_stack(y3, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y3, y3, 1);
}
else{
mi_sub_1_stack(y3, y3, 1);
}
}
mi_mul_stack(t, ah4, k); mi_div_q_r_stack(y4, r, t, N);
if(mi_compare_abs_stack(r, Nt) == GREATER){
if(t->s == POSITIVE){
mi_add_1_stack(y4, y4, 1);
}
else{
mi_sub_1_stack(y4, y4, 1);
}
}
mi_mul_stack(K, A2, y4);
mi_mul_stack(T, A3, y3);
mi_add_stack(K, K, T);
mi_mul_stack(T, A4, y2);
mi_add_stack(K, K, T);
mi_mul_stack(k0, A1, y1);
mi_mul_stack(T, A4, y3);
mi_add_stack(k0, k0, T);
mi_mul_stack(T, A3, y4);
mi_add_stack(k0, k0, T);
mi_sub_stack(k0, k0, K);
mi_sub_stack(k0, k, k0);
mi_mul_stack(k1, A1, y2);
mi_mul_stack(T, A2, y1);
mi_add_stack(k1, k1, T);
mi_mul_stack(T, A4, y4);
mi_add_stack(k1, k1, T);
mi_sub_stack(k1, K, k1);
mi_mul_stack(k2, A1, y3);
mi_mul_stack(T, A2, y2);
mi_add_stack(k2, k2, T);
mi_mul_stack(T, A3, y1);
mi_add_stack(k2, k2, T);
mi_sub_stack(k2, K, k2);
mi_mul_stack(k3, A2, y3);
mi_mul_stack(T, A1, y4);
mi_add_stack(k3, k3, T);
mi_mul_stack(T, A4, y1);
mi_add_stack(k3, k3, T);
mi_mul_stack(T, A3, y2);
mi_add_stack(k3, k3, T);
mi_sub_stack(k3, K, k3);
hec_fp_cpy_2e128mc_g2i(d0, D2);
phi(d1, d0, cn);
phi(d2, d1, cn);
phi(d3, d2, cn);
if(k0->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i(d0, d0, (CNS_hec_fp_2e128mc)cn);
}
if(k1->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i(d1, d1, (CNS_hec_fp_2e128mc)cn);
}
if(k2->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i(d2, d2, (CNS_hec_fp_2e128mc)cn);
}
if(k3->s == NEGATIVE){
hec_fp_neg_2e128mc_g2i(d3, d3, (CNS_hec_fp_2e128mc)cn);
}
fp_set_1_2e128mc_x8664(tbl[0]->Z, 0); //Marker for the identity element.
hec_fp_cpy_2e128mc_g2i(tbl[1], d0);
hec_fp_cpy_2e128mc_g2i(tbl[2], d1);
hec_fp_aadd_2e128mc_g2i(tbl[3], tbl[2], tbl[1], (CNS_hec_fp_2e128mc)cn);
hec_fp_cpy_2e128mc_g2i(tbl[4], d2);
hec_fp_aadd_2e128mc_g2i(tbl[5], tbl[4], tbl[1], (CNS_hec_fp_2e128mc)cn);
hec_fp_aadd_2e128mc_g2i(tbl[6], tbl[4], tbl[2], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[7], tbl[4], tbl[3], (CNS_hec_fp_2e128mc)cn);
hec_fp_cpy_2e128mc_g2i(tbl[8], d3);
hec_fp_aadd_2e128mc_g2i(tbl[9], tbl[8], tbl[1], (CNS_hec_fp_2e128mc)cn);
hec_fp_aadd_2e128mc_g2i(tbl[10], tbl[8], tbl[2], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[11], tbl[8], tbl[3], (CNS_hec_fp_2e128mc)cn);
hec_fp_aadd_2e128mc_g2i(tbl[12], tbl[8], tbl[4], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[13], tbl[8], tbl[5], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[14], tbl[8], tbl[6], (CNS_hec_fp_2e128mc)cn);
hec_fp_add_2e128mc_g2i(tbl[15], tbl[8], tbl[7], (CNS_hec_fp_2e128mc)cn);
fp_set_1_2e128mc_x8664(D1->Z, 0); //Marker for the identity element.
/*TODO: The following lines solves a minor problem caused by 1-bit-longer-than-expected mini-scalars at low-level. This can be made prettier. */
fp_cnt_bits(&bt, k0->v->n, k0->v->l);
fp_cnt_bits(&b, k1->v->n, k1->v->l);
if(b > bt){ bt = b; }
fp_cnt_bits(&b, k2->v->n, k2->v->l);
if(b > bt){ bt = b; }
fp_cnt_bits(&b, k3->v->n, k3->v->l);
if(b > bt){ bt = b; }
k0->v->n[k0->v->l] = 0;
k1->v->n[k1->v->l] = 0;
k2->v->n[k2->v->l] = 0;
k3->v->n[k3->v->l] = 0;
j = bt;
mam_ith_bit(ei, k3->v->n, j); b = ei;
mam_ith_bit(ei, k2->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k1->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k0->v->n, j); b = (b << 1) + ei;
hec_fp_cpy_2e128mc_g2i(D1, tbl[b]);
//#pragma omp parallel for num_threads(2)
for(j = bt-1; j > 0; j--){
hec_fp_dbl_2e128mc_g2i_a2is0_a3is0(D1, D1, (CNS_hec_fp_2e128mc)cn);
mam_ith_bit(ei, k3->v->n, j); b = ei;
mam_ith_bit(ei, k2->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k1->v->n, j); b = (b << 1) + ei;
mam_ith_bit(ei, k0->v->n, j); b = (b << 1) + ei;
hec_fp_add_2e128mc_g2i(D1, D1, tbl[b], (CNS_hec_fp_2e128mc)cn);
}
}
|
resize_bilinear.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_RESIZE_BILINEAR_H_
#define MACE_KERNELS_RESIZE_BILINEAR_H_
#include <algorithm>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
struct CachedInterpolation {
index_t lower; // Lower source index used in the interpolation
index_t upper; // Upper source index used in the interpolation
// 1-D linear iterpolation scale (see:
// https://en.wikipedia.org/wiki/Bilinear_interpolation)
float lerp;
};
inline float CalculateResizeScale(index_t in_size,
index_t out_size,
bool align_corners) {
return (align_corners && out_size > 1)
? (in_size - 1) / static_cast<float>(out_size - 1)
: in_size / static_cast<float>(out_size);
}
inline void ComputeInterpolationWeights(
const index_t out_size,
const index_t in_size,
const float scale,
CachedInterpolation *interpolation) {
interpolation[out_size].lower = 0;
interpolation[out_size].upper = 0;
for (index_t i = out_size - 1; i >= 0; --i) {
const float in = i * scale;
interpolation[i].lower = static_cast<index_t>(in);
interpolation[i].upper = std::min(interpolation[i].lower + 1, in_size - 1);
interpolation[i].lerp = in - interpolation[i].lower;
}
}
inline float ComputeLerp(const float top_left,
const float top_right,
const float bottom_left,
const float bottom_right,
const float x_lerp,
const float y_lerp) {
const float top = top_left + (top_right - top_left) * x_lerp;
const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
return top + (bottom - top) * y_lerp;
}
inline void ResizeImage(const float *images,
const index_t batch_size,
const index_t in_height,
const index_t in_width,
const index_t out_height,
const index_t out_width,
const index_t channels,
const std::vector<CachedInterpolation> &xs_vec,
const std::vector<CachedInterpolation> &ys,
float *output) {
const CachedInterpolation *xs = xs_vec.data();
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch_size; ++b) {
for (index_t c = 0; c < channels; ++c) {
const float
*channel_input_ptr =
images + (b * channels + c) * in_height * in_width;
float *channel_output_ptr =
output + (b * channels + c) * out_height * out_width;
for (index_t y = 0; y < out_height; ++y) {
const float *y_lower_input_ptr =
channel_input_ptr + ys[y].lower * in_width;
const float *y_upper_input_ptr =
channel_input_ptr + ys[y].upper * in_width;
const float ys_lerp = ys[y].lerp;
for (index_t x = 0; x < out_width; ++x) {
const float xs_lerp = xs[x].lerp;
const float top_left = y_lower_input_ptr[xs[x].lower];
const float top_right = y_lower_input_ptr[xs[x].upper];
const float bottom_left = y_upper_input_ptr[xs[x].lower];
const float bottom_right = y_upper_input_ptr[xs[x].upper];
channel_output_ptr[y * out_width + x] =
ComputeLerp(top_left, top_right, bottom_left,
bottom_right, xs_lerp, ys_lerp);
}
}
}
}
}
struct ResizeBilinearFunctorBase {
ResizeBilinearFunctorBase(const std::vector<index_t> &size,
bool align_corners)
: align_corners_(align_corners) {
MACE_CHECK(size.size() == 2);
out_height_ = size[0];
out_width_ = size[1];
}
protected:
bool align_corners_;
index_t out_height_;
index_t out_width_;
};
template<DeviceType D, typename T>
struct ResizeBilinearFunctor;
template<>
struct ResizeBilinearFunctor<DeviceType::CPU, float>
: ResizeBilinearFunctorBase {
ResizeBilinearFunctor(const std::vector<index_t> &size, bool align_corners)
: ResizeBilinearFunctorBase(size, align_corners) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
const index_t batch = input->dim(0);
const index_t channels = input->dim(1);
const index_t in_height = input->dim(2);
const index_t in_width = input->dim(3);
index_t out_height = out_height_;
index_t out_width = out_width_;
MACE_CHECK(out_height > 0 && out_width > 0);
std::vector<index_t> out_shape{batch, channels, out_height, out_width};
MACE_RETURN_IF_ERROR(output->Resize(out_shape));
Tensor::MappingGuard input_mapper(input);
Tensor::MappingGuard output_mapper(output);
const float *input_data = input->data<float>();
float *output_data = output->mutable_data<float>();
if (out_height == in_height && out_width == in_width) {
std::copy(input_data,
input_data + batch * channels * in_height * in_width,
output_data);
return MACE_SUCCESS;
}
float height_scale =
CalculateResizeScale(in_height, out_height, align_corners_);
float width_scale =
CalculateResizeScale(in_width, out_width, align_corners_);
std::vector<CachedInterpolation> ys(out_height + 1);
std::vector<CachedInterpolation> xs(out_width + 1);
// Compute the cached interpolation weights on the x and y dimensions.
ComputeInterpolationWeights(out_height, in_height, height_scale, ys.data());
ComputeInterpolationWeights(out_width, in_width, width_scale, xs.data());
ResizeImage(input_data, batch, in_height, in_width, out_height, out_width,
channels, xs, ys, output_data);
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template<typename T>
struct ResizeBilinearFunctor<DeviceType::GPU, T>
: ResizeBilinearFunctorBase {
ResizeBilinearFunctor(const std::vector<index_t> &size, bool align_corners)
: ResizeBilinearFunctorBase(size, align_corners) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_RESIZE_BILINEAR_H_
|
Kernel_3d_GDZ.h | #ifndef KRIPKE_KERNEL_3D_GDZ_H__
#define KRIPKE_KERNEL_3D_GDZ_H__
#include<Kripke/Kernel.h>
#include<Grid.h>
class Kernel_3d_GDZ : public Kernel {
public:
typedef std::vector<std::vector<double>> result_type;
// Grid is needed to access metadata (e.g. gd_sets) stored on it.
Grid_Data* grid_data;
int group_set;
int direction_set;
Kernel_3d_GDZ(Grid_Data*);
virtual ~Kernel_3d_GDZ();
virtual Nesting_Order nestingPsi(void) const;
virtual Nesting_Order nestingPhi(void) const;
virtual void LTimes(Grid_Data *grid_data);
virtual void LPlusTimes(Grid_Data *grid_data);
template<typename GridView, typename IPlane, typename JPlane, typename KPlane>
result_type
operator()(GridView& grid_view, IPlane const& i_plane, JPlane const& j_plane,
KPlane const& k_plane);
void define_type(stapl::typer& t)
{
t.member(grid_data);
t.member(group_set);
t.member(direction_set);
}
};
/* Sweep routine for Diamond-Difference */
/* Macros for offsets with fluxes on cell faces */
#define I_PLANE_INDEX(j, k) (k)*(local_jmax) + (j)
#define J_PLANE_INDEX(i, k) (k)*(local_imax) + (i)
#define K_PLANE_INDEX(i, j) (j)*(local_imax) + (i)
#define Zonal_INDEX(i, j, k) (i) + (local_imax)*(j) \
+ (local_imax)*(local_jmax)*(k)
template<typename GridView, typename IPlane, typename JPlane, typename KPlane>
std::vector<std::vector<double>>
Kernel_3d_GDZ::operator()(GridView& grid_view, IPlane const& i_plane_in,
JPlane const& j_plane_in, KPlane const& k_plane_in)
{
typedef std::array<typename GridView::value_type::property_type::
storage_type::index, 2> index_type;
result_type result(3);
std::vector<double> i_plane = i_plane_in[0];
std::vector<double> j_plane = j_plane_in[0];
std::vector<double> k_plane = k_plane_in[0];
// grid_data, group_set, and direction_set are data members of the Kernel.
Group_Dir_Set& gd_set = grid_data->gd_sets()[group_set][direction_set];
int num_directions = gd_set.num_directions;
int num_groups = gd_set.num_groups;
Directions *direction = gd_set.directions;
int local_imax = grid_data->nzones()[0];
int local_jmax = grid_data->nzones()[1];
int local_kmax = grid_data->nzones()[2];
auto dx = grid_data->deltas(0);
auto dy = grid_data->deltas(1);
auto dz = grid_data->deltas(2);
// All directions have same id,jd,kd, since these are all one Direction Set
// So pull that information out now
int octant = direction[0].octant;
Grid_Sweep_Block const &extent = grid_data->octant_extent()[octant];
#ifdef KRIPKE_USE_OPENMP
#pragma omp parallel for
#endif
for (int group = 0; group < num_groups; ++group) {
std::vector<double> xcos_dxi_all(local_imax);
std::vector<double> ycos_dyj_all(local_jmax);
std::vector<double> zcos_dzk_all(local_kmax);
index_type sigt_idx{{gd_set.group0+group, 0}};
for (int d = 0; d < num_directions; ++d) {
index_type psi_idx{{group, d}};
index_type rhs_idx{{group, d}};
int plane_idx = num_directions * num_groups + d * num_groups + group;
double xcos = direction[d].xcos;
double ycos = direction[d].ycos;
double zcos = direction[d].zcos;
for (int i = 0; i < local_imax; ++i) {
double dxi = dx[i + 1];
xcos_dxi_all[i] = 2.0 * xcos / dxi;
}
for (int j = 0; j < local_jmax; ++j) {
double dyj = dy[j + 1];
ycos_dyj_all[j] = 2.0 * ycos / dyj;
}
for (int k = 0; k < local_kmax; ++k) {
double dzk = dz[k + 1];
zcos_dzk_all[k] = 2.0 * zcos / dzk;
}
/* Perform transport sweep of the grid 1 cell at a time. */
for (int k = extent.start_k; k != extent.end_k; k += extent.inc_k) {
double zcos_dzk = zcos_dzk_all[k];
for (int j = extent.start_j; j != extent.end_j; j += extent.inc_j) {
double ycos_dyj = ycos_dyj_all[j];
for (int i = extent.start_i; i != extent.end_i; i += extent.inc_i) {
double xcos_dxi = xcos_dxi_all[i];
/* Calculate new zonal flux */
// get a reference to the vertex being processed.
int z = Zonal_INDEX(i, j, k);
auto v = (*grid_view.find_vertex(z)).property();
double psi_lf_g_d_z = i_plane[I_PLANE_INDEX(j, k) * plane_idx];
double psi_fr_g_d_z = j_plane[J_PLANE_INDEX(i, k) * plane_idx];
double psi_bo_g_d_z = k_plane[K_PLANE_INDEX(i, j) * plane_idx];
auto psi_z = v.psi()[group_set][direction_set];
auto rhs_z = v.rhs()[group_set][direction_set];
double psi_g_d_z = (rhs_z(rhs_idx)
+ psi_lf_g_d_z * xcos_dxi
+ psi_fr_g_d_z * ycos_dyj
+ psi_bo_g_d_z * zcos_dzk)
/ (xcos_dxi + ycos_dyj + zcos_dzk
+ v.sigt()(sigt_idx));
psi_z(psi_idx) = psi_g_d_z;
/* Apply diamond-difference relationships */
psi_g_d_z *= 2.0;
psi_lf_g_d_z = psi_g_d_z - psi_lf_g_d_z;
psi_fr_g_d_z = psi_g_d_z - psi_fr_g_d_z;
psi_bo_g_d_z = psi_g_d_z - psi_bo_g_d_z;
}
}
}
}
}
result[0] = std::move(i_plane);
result[1] = std::move(j_plane);
result[2] = std::move(k_plane);
return result;
}
#endif
|
mandelbrot.c |
#include <stdio.h>
#include <math.h>
#include <complex.h>
#include <time.h>
#include <omp.h>
#define COL 1202
#define xmin_coordinates -0.13856524454488
#define xmax_coordinates -0.13956524454488
#define ymin_coordinates -0.64935990748190
#define ymax_coordinates -0.65035990748190
#define GNUPLOT "gnuplot -persist"
typedef struct
{
int height;
int width;
int n_iteration;
float xmin;
float xmax;
float ymin;
float ymax;
float resolutoin;
}mandelbrot;
int complex_cal(float x,float y, int iteration){
double complex z=x+y*I;
double complex z_1=0+0*I;
for(int i=0;i<iteration;i++){
if (cabs(z_1)>2){
return i ;
}
z_1=z_1*z_1+z;
}
return iteration+1;
}
void linspace(float a, float b, int n, float u[])
{
float c;
int i;
/* step size */
c = (b - a)/(n - 1);
/* fill vector */
#pragma omp parallel for
for(i = 0; i < n - 1; ++i)
u[i] = a + i*c;
/* fix last entry to b */
u[n - 1] = b;
/* done */
}
void init_mandelbrot(int matrix[][COL],int width, int height,float x_span[],float y_span[], int iteration){
#pragma omp parallel for
for (int i=0;i< height; i++){
for(int k=0;k<width;k++){
matrix[i][k]= complex_cal(x_span[i],y_span[k],iteration);
}
}
}
int main(){
clock_t begin = clock();
mandelbrot mandelbrot1={COL,COL,1024,xmin_coordinates,xmax_coordinates,ymin_coordinates,ymax_coordinates,0.65+1.1875/(COL)};
float arr_height[mandelbrot1.height];
float arr_width[mandelbrot1.width];
linspace(mandelbrot1.xmin,mandelbrot1.xmax,mandelbrot1.width,arr_height);
linspace(mandelbrot1.ymin,mandelbrot1.ymax,mandelbrot1.height,arr_width);
// needs to be static bc takes to much space in main
static int matrix[COL][COL];
init_mandelbrot(matrix,mandelbrot1.width,mandelbrot1.height,arr_height,arr_width,mandelbrot1.n_iteration);
FILE *gp = popen(GNUPLOT, "w");
fprintf(gp," set pm3d map \n \
set format x \"\" \n \
set format x \"\" \n \
unset colorbox \n \
unset border \n \
unset xtics \n \
unset ytics \n \
unset xlabel \n \
set terminal png enhanced size 15500,15500 \n \
set output 'mandelbrot.png' \n \
splot '-' matrix \n ");
int i, j;
for (i = 0; i < mandelbrot1.height; i++) {
for (j = 0; j < mandelbrot1.width; j++)
fprintf(gp, "%d ", matrix[i][j]);
fprintf(gp, "\n");
}
pclose(gp);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("The running time is %f \n s", time_spent);
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_COPY_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
std::memcpy(dst, src, sizeof(DType) * size);
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 32;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
orphaned-directives.c | #include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif /* _OPENMP */
static double a[1000];
static void init(void)
{
int i;
i=i+5;
#pragma omp for
for (i=0;i<1000;i++)
{
a[i]=(double)i/2.0;
}
}
int main(void){
#pragma omp parallel
{
init();
}
return 0;
}
|
omp_parallel_for_if.c | <ompts:test>
<ompts:testdescription>Test which checks the omp parallel for if directive. Needs at least two threads.</ompts:testdescription>
<ompts:ompversion>2.0</ompts:ompversion>
<ompts:directive>omp parallel for if</ompts:directive>
<ompts:dependences></ompts:dependences>
<ompts:testcode>
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int <ompts:testcode:functionname>omp_parallel_for_if</ompts:testcode:functionname>(FILE * logFile){
int known_sum;
<ompts:orphan:vars>
int num_threads;
int sum, sum2;
int i;
int control;
</ompts:orphan:vars>
control = 0;
num_threads=0;
sum = 0;
sum2 = 0;
#pragma omp parallel for private(i) <ompts:check>if (control==1)</ompts:check>
<ompts:orphan>
for (i=0; i <= LOOPCOUNT; i++)
{
num_threads = omp_get_num_threads();
sum = sum + i;
} /*end of for*/
</ompts:orphan>
known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2;
fprintf (logFile, "Number of threads determined by omp_get_num_threads: %d\n", num_threads);
return (known_sum == sum && num_threads == 1);
} /* end of check_paralel_for_private */
</ompts:testcode>
</ompts:test>
|
diagmm_x_coo_u_col.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT rowC = mat->rows;
ALPHA_INT colC = columns;
ALPHA_INT num_threads = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_threads)
#endif
for (ALPHA_INT c = 0; c < colC; ++c)
{
for (ALPHA_INT r = 0; r < rowC; ++r)
{
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[index2(c, r, ldx)]);
alpha_mul(y[index2(c, r, ldy)], beta, y[index2(c, r, ldy)]);
alpha_add(y[index2(c, r, ldy)], y[index2(c, r, ldy)], t);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
ellipticSEMFEMSetup.c | /*
The MIT License (MIT)
Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include "elliptic.h"
typedef struct{
dfloat VX;
dfloat VY;
dlong localId;
hlong globalId;
}FEMverts_t;
typedef struct {
dlong localId;
hlong globalId;
int ownerRank;
}parallelNode_t;
// compare on global owners
int parallelCompareOwnersAndGlobalId(const void *a, const void *b);
// compare on global indices
int parallelCompareGlobalId(const void *a, const void *b);
// compare xy coordinates
int parallelCompareFEMvertsLocation(const void *a, const void *b){
dfloat NODETOL = 1e-6;
FEMverts_t *fa = (FEMverts_t*) a;
FEMverts_t *fb = (FEMverts_t*) b;
if(fa->VX < fb->VX - NODETOL) return -1;
if(fa->VX > fb->VX + NODETOL) return +1;
if(fa->VY < fb->VY - NODETOL) return -1;
if(fa->VY > fb->VY + NODETOL) return +1;
return 0;
}
// compare local id
int parallelCompareFEMvertsLocalId(const void *a, const void *b){
FEMverts_t *fa = (FEMverts_t*) a;
FEMverts_t *fb = (FEMverts_t*) b;
if(fa->localId < fb->localId) return -1;
if(fa->localId > fb->localId) return +1;
return 0;
}
int parallelCompareRowColumn(const void *a, const void *b);
void BuildFEMMatrixTri2D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixTet3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void BuildFEMMatrixHex3D (mesh_t *femMesh, mesh_t *pmesh, dfloat lambda, dlong *localIds, hlong* globalNumbering,int *globalOwners,dlong *cnt, nonZero_t *A);
void ellipticSEMFEMSetup(elliptic_t *elliptic, precon_t* precon, dfloat lambda) {
setupAide options = elliptic->options;
if (!(options.compareArgs("DISCRETIZATION", "CONTINUOUS"))) {
printf("SEMFEM is supported for CONTINUOUS only\n");
MPI_Barrier(elliptic->mesh->comm);
MPI_Finalize();
exit(0);
}
mesh_t* mesh = elliptic->mesh; //original mesh
mesh_t* pmesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //partially assembled fem mesh (result of projecting sem element to larger space)
precon->femMesh = (mesh_t*) calloc (1,sizeof(mesh_t)); //full fem mesh
mesh_t *femMesh = precon->femMesh;
memcpy(pmesh ,mesh,sizeof(mesh_t));
memcpy(femMesh,mesh,sizeof(mesh_t));
if (elliptic->elementType==TRIANGLES) {
//set semfem nodes as the grid points
pmesh->Np = mesh->NpFEM;
pmesh->r = mesh->rFEM;
pmesh->s = mesh->sFEM;
//count number of face nodes in the semfem element
dfloat NODETOL = 1e-6;
pmesh->Nfp=0;
for (int n=0;n<pmesh->Np;n++)
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->Nfp++;
//remake the faceNodes array
pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int));
int f0=0, f1=0, f2=0;
for (int n=0;n<pmesh->Np;n++) {
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n;
if (fabs(pmesh->r[n]+pmesh->s[n])<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n;
if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n;
}
//remake vertexNodes array
pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int));
for(int n=0;n<pmesh->Np;++n){
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL)
pmesh->vertexNodes[0] = n;
if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)<NODETOL)
pmesh->vertexNodes[1] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)<NODETOL)
pmesh->vertexNodes[2] = n;
}
// connect elements using parallel sort
meshParallelConnect(pmesh);
// compute physical (x,y) locations of the element nodes
meshPhysicalNodesTri2D(pmesh);
// free(sendBuffer);
meshHaloSetup(pmesh);
// connect face nodes (find trace indices)
meshConnectFaceNodes2D(pmesh);
// global nodes
meshParallelConnectNodes(pmesh);
//pmesh->globalIds is now populated
} else if (elliptic->elementType==TETRAHEDRA) {
//set semfem nodes as the grid points
pmesh->Np = mesh->NpFEM;
pmesh->r = mesh->rFEM;
pmesh->s = mesh->sFEM;
pmesh->t = mesh->tFEM;
//count number of face nodes in the semfem element
dfloat NODETOL = 1e-6;
pmesh->Nfp=0;
for (int n=0;n<pmesh->Np;n++)
if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->Nfp++;
//remake the faceNodes array
pmesh->faceNodes = (int *) calloc(pmesh->Nfaces*pmesh->Nfp,sizeof(int));
int f0=0, f1=0, f2=0, f3=0;
for (int n=0;n<pmesh->Np;n++) {
if (fabs(pmesh->t[n]+1)<NODETOL) pmesh->faceNodes[0*pmesh->Nfp+f0++] = n;
if (fabs(pmesh->s[n]+1)<NODETOL) pmesh->faceNodes[1*pmesh->Nfp+f1++] = n;
if (fabs(pmesh->r[n]+pmesh->s[n]+
pmesh->t[n]+1.0)<NODETOL) pmesh->faceNodes[2*pmesh->Nfp+f2++] = n;
if (fabs(pmesh->r[n]+1)<NODETOL) pmesh->faceNodes[3*pmesh->Nfp+f3++] = n;
}
//remake vertexNodes array
pmesh->vertexNodes = (int*) calloc(pmesh->Nverts, sizeof(int));
for(int n=0;n<pmesh->Np;++n){
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[0] = n;
if( (pmesh->r[n]-1)*(pmesh->r[n]-1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[1] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]-1)*(pmesh->s[n]-1)+(pmesh->t[n]+1)*(pmesh->t[n]+1)<NODETOL)
pmesh->vertexNodes[2] = n;
if( (pmesh->r[n]+1)*(pmesh->r[n]+1)+(pmesh->s[n]+1)*(pmesh->s[n]+1)+(pmesh->t[n]-1)*(pmesh->t[n]-1)<NODETOL)
pmesh->vertexNodes[3] = n;
}
// connect elements using parallel sort
meshParallelConnect(pmesh);
// compute physical (x,y) locations of the element nodes
meshPhysicalNodesTet3D(pmesh);
// free(sendBuffer);
meshHaloSetup(pmesh);
// connect face nodes (find trace indices)
meshConnectFaceNodes3D(pmesh);
// global nodes
meshParallelConnectNodes(pmesh);
//pmesh->globalIds is now populated
}
//now build the full degree 1 fem mesh
int femN = 1; //degree of fem approximation
/* allocate space for node coordinates */
femMesh->Nelements = mesh->NelFEM*mesh->Nelements;
femMesh->EToV = (hlong*) calloc(femMesh->Nelements*femMesh->Nverts, sizeof(hlong));
femMesh->EX = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
femMesh->EY = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
if (elliptic->dim==3)
femMesh->EZ = (dfloat*) calloc(femMesh->Nverts*femMesh->Nelements, sizeof(dfloat));
dlong *localIds = (dlong *) calloc(femMesh->Nverts*femMesh->Nelements,sizeof(dlong));
// dlong NFEMverts = mesh->Nelements*mesh->NpFEM;
for(dlong e=0;e<mesh->Nelements;++e){
for (int n=0;n<mesh->NelFEM;n++) {
dlong id[femMesh->Nverts];
dlong femId = e*mesh->NelFEM*mesh->Nverts+n*mesh->Nverts;
for (int i=0;i<femMesh->Nverts;i++) {
//local ids in the subelement fem grid
id[i] = e*mesh->NpFEM + mesh->FEMEToV[n*mesh->Nverts+i];
/* read vertex triplet for triangle */
femMesh->EToV[femId+i] = pmesh->globalIds[id[i]];
femMesh->EX[femId+i] = pmesh->x[id[i]];
femMesh->EY[femId+i] = pmesh->y[id[i]];
if (elliptic->dim==3)
femMesh->EZ[femId+i] = pmesh->z[id[i]];
}
switch(elliptic->elementType){
case TRIANGLES:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[2];
break;
case QUADRILATERALS:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2] in a degree 1 element
localIds[femId+3] = id[2];
break;
case TETRAHEDRA:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[2];
localIds[femId+3] = id[3];
break;
case HEXAHEDRA:
localIds[femId+0] = id[0];
localIds[femId+1] = id[1];
localIds[femId+2] = id[3]; //need to swap this as the Np nodes are ordered [0,1,3,2,4,5,7,6] in a degree 1 element
localIds[femId+3] = id[2];
localIds[femId+4] = id[4];
localIds[femId+5] = id[5];
localIds[femId+6] = id[7];
localIds[femId+7] = id[6];
break;
}
}
}
// connect elements using parallel sort
meshParallelConnect(femMesh);
switch(elliptic->elementType){
case TRIANGLES:
meshLoadReferenceNodesTri2D(femMesh, femN);
break;
case QUADRILATERALS:
meshLoadReferenceNodesQuad2D(femMesh, femN);
break;
case TETRAHEDRA:
meshLoadReferenceNodesTet3D(femMesh, femN);
break;
case HEXAHEDRA:
meshLoadReferenceNodesHex3D(femMesh, femN);
break;
}
int *faceFlag = (int*) calloc(pmesh->Np*pmesh->Nfaces,sizeof(int));
for (int f=0;f<pmesh->Nfaces;f++) {
for (int n=0;n<pmesh->Nfp;n++) {
int id = pmesh->faceNodes[f*pmesh->Nfp+n];
faceFlag[f*pmesh->Np + id] = 1; //flag the nodes on this face
}
}
//map from faces of fem sub-elements to the macro element face number
int *femFaceMap = (int*) calloc(mesh->NelFEM*femMesh->Nfaces,sizeof(int));
for (int n=0;n<mesh->NelFEM*femMesh->Nfaces;n++) femFaceMap[n] = -1;
for (int n=0;n<mesh->NelFEM;n++) {
for (int f=0;f<femMesh->Nfaces;f++) {
for (int face=0; face<pmesh->Nfaces;face++) {
//count the nodes on this face which are on a macro face
int NvertsOnFace = 0;
for (int i=0;i<femMesh->Nfp;i++){
int id = femMesh->faceNodes[f*femMesh->Nfp+i];
int v = mesh->FEMEToV[n*pmesh->Nverts+id];
NvertsOnFace += faceFlag[face*pmesh->Np + v];
}
if (NvertsOnFace == femMesh->Nfp)
femFaceMap[n*femMesh->Nfaces+f] = face; //on macro face
}
}
}
//fill the boundary flag array
femMesh->EToB = (int*) calloc(femMesh->Nelements*femMesh->Nfaces, sizeof(int));
for (dlong e=0;e<mesh->Nelements;e++) {
for (int n=0;n<mesh->NelFEM;n++) {
for (int f=0;f<femMesh->Nfaces;f++) {
int face = femFaceMap[n*femMesh->Nfaces+f];
if (face>-1) {
femMesh->EToB[(e*mesh->NelFEM +n)*femMesh->Nfaces +f] = mesh->EToB[e*mesh->Nfaces + face];
}
}
}
}
free(faceFlag);
free(femFaceMap);
switch(elliptic->elementType){
case TRIANGLES:
meshPhysicalNodesTri2D(femMesh);
meshGeometricFactorsTri2D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes2D(femMesh);
meshSurfaceGeometricFactorsTri2D(femMesh);
break;
case QUADRILATERALS:
meshPhysicalNodesQuad2D(femMesh);
meshGeometricFactorsQuad2D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes2D(femMesh);
meshSurfaceGeometricFactorsQuad2D(femMesh);
break;
case TETRAHEDRA:
meshPhysicalNodesTet3D(femMesh);
meshGeometricFactorsTet3D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes3D(femMesh);
meshSurfaceGeometricFactorsTet3D(femMesh);
break;
case HEXAHEDRA:
meshPhysicalNodesHex3D(femMesh);
meshGeometricFactorsHex3D(femMesh);
meshHaloSetup(femMesh);
meshConnectFaceNodes3D(femMesh);
meshSurfaceGeometricFactorsHex3D(femMesh);
break;
}
// global nodes
meshParallelConnectNodes(femMesh);
dlong Ntotal = pmesh->Np*pmesh->Nelements;
int verbose = options.compareArgs("VERBOSE","TRUE") ? 1:0;
pmesh->maskedGlobalIds = (hlong *) calloc(Ntotal,sizeof(hlong));
memcpy(pmesh->maskedGlobalIds, pmesh->globalIds, Ntotal*sizeof(hlong));
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
//build a new mask for NpFEM>Np node sets
// gather-scatter
pmesh->ogs = ogsSetup(Ntotal, pmesh->globalIds, mesh->comm, verbose, mesh->device);
//make a node-wise bc flag using the gsop (prioritize Dirichlet boundaries over Neumann)
int *mapB = (int *) calloc(Ntotal,sizeof(int));
for (dlong e=0;e<pmesh->Nelements;e++) {
for (int n=0;n<pmesh->Np;n++) mapB[n+e*pmesh->Np] = 1E9;
for (int f=0;f<pmesh->Nfaces;f++) {
int bc = pmesh->EToB[f+e*pmesh->Nfaces];
if (bc>0) {
for (int n=0;n<pmesh->Nfp;n++) {
int BCFlag = elliptic->BCType[bc];
int fid = pmesh->faceNodes[n+f*pmesh->Nfp];
mapB[fid+e*pmesh->Np] = mymin(BCFlag,mapB[fid+e*pmesh->Np]);
}
}
}
}
ogsGatherScatter(mapB, ogsInt, ogsMin, pmesh->ogs);
//use the bc flags to find masked ids
for (dlong n=0;n<pmesh->Nelements*pmesh->Np;n++) {
if (mapB[n] == 1) { //Dirichlet boundary
pmesh->maskedGlobalIds[n] = 0;
}
}
free(mapB);
} else {
//mask using the original mask
for (dlong n=0;n<elliptic->Nmasked;n++)
pmesh->maskedGlobalIds[elliptic->maskIds[n]] = 0;
}
//build masked gs handle
precon->FEMogs = ogsSetup(Ntotal, pmesh->maskedGlobalIds, mesh->comm, verbose, mesh->device);
// number of degrees of freedom on this rank (after gathering)
hlong Ngather = precon->FEMogs->Ngather;
// create a global numbering system
hlong *globalIds = (hlong *) calloc(Ngather,sizeof(hlong));
int *owner = (int *) calloc(Ngather,sizeof(int));
// every gathered degree of freedom has its own global id
hlong *globalStarts = (hlong *) calloc(mesh->size+1,sizeof(hlong));
MPI_Allgather(&Ngather, 1, MPI_HLONG, globalStarts+1, 1, MPI_HLONG, mesh->comm);
for(int r=0;r<mesh->size;++r)
globalStarts[r+1] = globalStarts[r]+globalStarts[r+1];
//use the offsets to set a consecutive global numbering
for (dlong n =0;n<precon->FEMogs->Ngather;n++) {
globalIds[n] = n + globalStarts[mesh->rank];
owner[n] = mesh->rank;
}
//scatter this numbering to the original nodes
hlong *globalNumbering = (hlong *) calloc(Ntotal,sizeof(hlong));
int *globalOwners = (int *) calloc(Ntotal,sizeof(int));
for (dlong n=0;n<Ntotal;n++) globalNumbering[n] = -1;
ogsScatter(globalNumbering, globalIds, ogsHlong, ogsAdd, precon->FEMogs);
ogsScatter(globalOwners, owner, ogsInt, ogsAdd, precon->FEMogs);
free(globalIds); free(owner);
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
//dont need these anymore
free(pmesh->vmapM);
free(pmesh->vmapP);
free(pmesh->mapP);
//maybe more cleanup can go here
}
if (elliptic->elementType==TRIANGLES) {
//build stiffness matrices
femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
for (int n=0;n<femMesh->Np;n++) {
for (int m=0;m<femMesh->Np;m++) {
for (int k=0;k<femMesh->Np;k++) {
for (int l=0;l<femMesh->Np;l++) {
femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
}
}
}
}
} else if (elliptic->elementType==TETRAHEDRA) {
//build stiffness matrices
femMesh->Srr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srs = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Srt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Ssr = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sss = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sst = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Str = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Sts = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
femMesh->Stt = (dfloat *) calloc(femMesh->Np*femMesh->Np,sizeof(dfloat));
for (int n=0;n<femMesh->Np;n++) {
for (int m=0;m<femMesh->Np;m++) {
for (int k=0;k<femMesh->Np;k++) {
for (int l=0;l<femMesh->Np;l++) {
femMesh->Srr[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Srs[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Srt[m+n*femMesh->Np] += femMesh->Dr[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
femMesh->Ssr[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sss[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Sst[m+n*femMesh->Np] += femMesh->Ds[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
femMesh->Str[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dr[m+k*femMesh->Np];
femMesh->Sts[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Ds[m+k*femMesh->Np];
femMesh->Stt[m+n*femMesh->Np] += femMesh->Dt[n+l*femMesh->Np]*femMesh->MM[k+l*femMesh->Np]*femMesh->Dt[m+k*femMesh->Np];
}
}
}
}
}
if (mesh->rank==0) printf("Building full SEMFEM matrix..."); fflush(stdout);
// Build non-zeros of stiffness matrix (unassembled)
dlong nnzLocal = femMesh->Np*femMesh->Np*femMesh->Nelements;
dlong cnt =0;
nonZero_t *sendNonZeros = (nonZero_t*) calloc(nnzLocal, sizeof(nonZero_t));
int *AsendCounts = (int*) calloc(mesh->size, sizeof(int));
int *ArecvCounts = (int*) calloc(mesh->size, sizeof(int));
int *AsendOffsets = (int*) calloc(mesh->size+1, sizeof(int));
int *ArecvOffsets = (int*) calloc(mesh->size+1, sizeof(int));
//Build unassembed non-zeros
switch(elliptic->elementType){
case TRIANGLES:
BuildFEMMatrixTri2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
case QUADRILATERALS:
BuildFEMMatrixQuad2D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
case TETRAHEDRA:
BuildFEMMatrixTet3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
case HEXAHEDRA:
BuildFEMMatrixHex3D(femMesh,pmesh,lambda, localIds, globalNumbering, globalOwners,&cnt,sendNonZeros); break;
}
// Make the MPI_NONZERO_T data type
MPI_Datatype MPI_NONZERO_T;
MPI_Datatype dtype[4] = {MPI_HLONG, MPI_HLONG, MPI_INT, MPI_DFLOAT};
int blength[4] = {1, 1, 1, 1};
MPI_Aint addr[4], displ[4];
MPI_Get_address ( &(sendNonZeros[0] ), addr+0);
MPI_Get_address ( &(sendNonZeros[0].col ), addr+1);
MPI_Get_address ( &(sendNonZeros[0].ownerRank), addr+2);
MPI_Get_address ( &(sendNonZeros[0].val ), addr+3);
displ[0] = 0;
displ[1] = addr[1] - addr[0];
displ[2] = addr[2] - addr[0];
displ[3] = addr[3] - addr[0];
MPI_Type_create_struct (4, blength, displ, dtype, &MPI_NONZERO_T);
MPI_Type_commit (&MPI_NONZERO_T);
// count how many non-zeros to send to each process
for(dlong n=0;n<cnt;++n)
AsendCounts[sendNonZeros[n].ownerRank]++;
// sort by row ordering
qsort(sendNonZeros, cnt, sizeof(nonZero_t), parallelCompareRowColumn);
// find how many nodes to expect (should use sparse version)
MPI_Alltoall(AsendCounts, 1, MPI_INT, ArecvCounts, 1, MPI_INT, mesh->comm);
// find send and recv offsets for gather
dlong nnz = 0;
for(int r=0;r<mesh->size;++r){
AsendOffsets[r+1] = AsendOffsets[r] + AsendCounts[r];
ArecvOffsets[r+1] = ArecvOffsets[r] + ArecvCounts[r];
nnz += ArecvCounts[r];
}
nonZero_t *A = (nonZero_t*) calloc(nnz, sizeof(nonZero_t));
// determine number to receive
MPI_Alltoallv(sendNonZeros, AsendCounts, AsendOffsets, MPI_NONZERO_T,
A, ArecvCounts, ArecvOffsets, MPI_NONZERO_T,
mesh->comm);
// sort received non-zero entries by row block (may need to switch compareRowColumn tests)
qsort(A, nnz, sizeof(nonZero_t), parallelCompareRowColumn);
// compress duplicates
cnt = 0;
for(dlong n=1;n<nnz;++n){
if(A[n].row == A[cnt].row && A[n].col == A[cnt].col){
A[cnt].val += A[n].val;
} else{
++cnt;
A[cnt] = A[n];
}
}
if (nnz) cnt++;
nnz = cnt;
if(mesh->rank==0) printf("done.\n");
MPI_Barrier(mesh->comm);
MPI_Type_free(&MPI_NONZERO_T);
hlong *Rows = (hlong *) calloc(nnz, sizeof(hlong));
hlong *Cols = (hlong *) calloc(nnz, sizeof(hlong));
dfloat *Vals = (dfloat*) calloc(nnz,sizeof(dfloat));
for (dlong n=0;n<nnz;n++) {
Rows[n] = A[n].row;
Cols[n] = A[n].col;
Vals[n] = A[n].val;
}
free(A);
precon->parAlmond = parAlmond::Init(mesh->device, mesh->comm, options);
parAlmond::AMGSetup(precon->parAlmond,
globalStarts,
nnz,
Rows,
Cols,
Vals,
elliptic->allNeumann,
elliptic->allNeumannPenalty);
free(Rows); free(Cols); free(Vals);
if (options.compareArgs("VERBOSE", "TRUE"))
parAlmond::Report(precon->parAlmond);
if (elliptic->elementType==TRIANGLES||elliptic->elementType==TETRAHEDRA) {
// //tell parAlmond not to gather this level (its done manually)
// agmgLevel *baseLevel = precon->parAlmond->levels[0];
// baseLevel->gatherLevel = false;
// baseLevel->weightedInnerProds = false;
// build interp and anterp
dfloat *SEMFEMAnterp = (dfloat*) calloc(mesh->NpFEM*mesh->Np, sizeof(dfloat));
for(int n=0;n<mesh->NpFEM;++n){
for(int m=0;m<mesh->Np;++m){
SEMFEMAnterp[n+m*mesh->NpFEM] = mesh->SEMFEMInterp[n*mesh->Np+m];
}
}
mesh->o_SEMFEMInterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),mesh->SEMFEMInterp);
mesh->o_SEMFEMAnterp = mesh->device.malloc(mesh->NpFEM*mesh->Np*sizeof(dfloat),SEMFEMAnterp);
free(SEMFEMAnterp);
precon->o_rFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat));
precon->o_zFEM = mesh->device.malloc(mesh->Nelements*mesh->NpFEM*sizeof(dfloat));
precon->o_GrFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat));
precon->o_GzFEM = mesh->device.malloc(precon->FEMogs->Ngather*sizeof(dfloat));
} else {
// //tell parAlmond to gather this level
// agmgLevel *baseLevel = precon->parAlmond->levels[0];
// baseLevel->gatherLevel = true;
parAlmond::multigridLevel *baseLevel = precon->parAlmond->levels[0];
precon->rhsG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat));
precon->xG = (dfloat*) calloc(baseLevel->Ncols,sizeof(dfloat));
precon->o_rhsG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat));
precon->o_xG = mesh->device.malloc(baseLevel->Ncols*sizeof(dfloat));
// baseLevel->Srhs = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat));
// baseLevel->Sx = (dfloat*) calloc(mesh->Np*mesh->Nelements,sizeof(dfloat));
// baseLevel->o_Srhs = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat));
// baseLevel->o_Sx = mesh->device.malloc(mesh->Np*mesh->Nelements*sizeof(dfloat));
// baseLevel->weightedInnerProds = false;
// baseLevel->gatherArgs = (void **) calloc(3,sizeof(void*));
// baseLevel->gatherArgs[0] = (void *) elliptic;
// baseLevel->gatherArgs[1] = (void *) precon->FEMogs; //use the gs made from the partial gathered femgrid
// baseLevel->gatherArgs[2] = (void *) &(baseLevel->o_Sx);
// baseLevel->scatterArgs = baseLevel->gatherArgs;
// baseLevel->device_gather = ellipticGather;
// baseLevel->device_scatter = ellipticScatter;
}
}
void BuildFEMMatrixTri2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int n=0;n<femMesh->Np;n++) {
dlong idn = localIds[e*femMesh->Np + n];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int m=0;m<femMesh->Np;m++) {
dlong idm = localIds[e*femMesh->Np + m];
if (globalNumbering[idm]<0) continue; //skip masked nodes
dfloat val = 0.;
dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID];
dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID];
dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID];
dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID];
val += Grr*femMesh->Srr[m+n*femMesh->Np];
val += Grs*femMesh->Srs[m+n*femMesh->Np];
val += Grs*femMesh->Ssr[m+n*femMesh->Np];
val += Gss*femMesh->Sss[m+n*femMesh->Np];
val += J*lambda*femMesh->MM[m+n*femMesh->Np];
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
void BuildFEMMatrixQuad2D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int ny=0;ny<femMesh->Nq;ny++) {
for (int nx=0;nx<femMesh->Nq;nx++) {
dlong idn = localIds[e*femMesh->Np + nx+ny*femMesh->Nq];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int my=0;my<femMesh->Nq;my++) {
for (int mx=0;mx<femMesh->Nq;mx++) {
dlong idm = localIds[e*femMesh->Np + mx+my*femMesh->Nq];
if (globalNumbering[idm]<0) continue; //skip masked nodes
int id;
dfloat val = 0.;
if (ny==my) {
for (int k=0;k<femMesh->Nq;k++) {
id = k+ny*femMesh->Nq;
dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np];
val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq];
}
}
id = mx+ny*femMesh->Nq;
dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq];
id = nx+my*femMesh->Nq;
dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq];
if (nx==mx) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+k*femMesh->Nq;
dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np];
val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq];
}
}
if ((nx==mx)&&(ny==my)) {
id = nx + ny*femMesh->Nq;
dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np];
val += JW*lambda;
}
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
}
}
void BuildFEMMatrixTet3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
dfloat Grr = femMesh->ggeo[e*femMesh->Nggeo + G00ID];
dfloat Grs = femMesh->ggeo[e*femMesh->Nggeo + G01ID];
dfloat Grt = femMesh->ggeo[e*femMesh->Nggeo + G02ID];
dfloat Gss = femMesh->ggeo[e*femMesh->Nggeo + G11ID];
dfloat Gst = femMesh->ggeo[e*femMesh->Nggeo + G12ID];
dfloat Gtt = femMesh->ggeo[e*femMesh->Nggeo + G22ID];
dfloat J = femMesh->ggeo[e*femMesh->Nggeo + GWJID];
for (int n=0;n<femMesh->Np;n++) {
dlong idn = localIds[e*femMesh->Np + n];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int m=0;m<femMesh->Np;m++) {
dlong idm = localIds[e*femMesh->Np + m];
if (globalNumbering[idm]<0) continue; //skip masked nodes
dfloat val = 0.;
val += Grr*femMesh->Srr[m+n*femMesh->Np];
val += Grs*femMesh->Srs[m+n*femMesh->Np];
val += Grt*femMesh->Srt[m+n*femMesh->Np];
val += Grs*femMesh->Ssr[m+n*femMesh->Np];
val += Gss*femMesh->Sss[m+n*femMesh->Np];
val += Gst*femMesh->Sst[m+n*femMesh->Np];
val += Grt*femMesh->Str[m+n*femMesh->Np];
val += Gst*femMesh->Sts[m+n*femMesh->Np];
val += Gtt*femMesh->Stt[m+n*femMesh->Np];
val += J*lambda*femMesh->MM[m+n*femMesh->Np];
dfloat nonZeroThreshold = 1e-7;
if (fabs(val)>nonZeroThreshold) {
#pragma omp critical
{
// pack non-zero
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
void BuildFEMMatrixHex3D(mesh_t *femMesh, mesh_t *pmesh, dfloat lambda,
dlong *localIds, hlong* globalNumbering, int *globalOwners,
dlong *cnt, nonZero_t *A) {
#pragma omp parallel for
for (dlong e=0;e<femMesh->Nelements;e++) {
for (int nz=0;nz<femMesh->Nq;nz++) {
for (int ny=0;ny<femMesh->Nq;ny++) {
for (int nx=0;nx<femMesh->Nq;nx++) {
dlong nn = nx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dlong idn = localIds[e*femMesh->Np + nn];
if (globalNumbering[idn]<0) continue; //skip masked nodes
for (int mz=0;mz<femMesh->Nq;mz++) {
for (int my=0;my<femMesh->Nq;my++) {
for (int mx=0;mx<femMesh->Nq;mx++) {
dlong mm = mx+my*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dlong idm = localIds[e*femMesh->Np + mm];
if (globalNumbering[idm]<0) continue; //skip masked nodes
int id;
dfloat val = 0.;
if ((ny==my)&&(nz==mz)) {
for (int k=0;k<femMesh->Nq;k++) {
id = k+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G00ID*femMesh->Np];
val += Grr*femMesh->D[nx+k*femMesh->Nq]*femMesh->D[mx+k*femMesh->Nq];
}
}
if (nz==mz) {
id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grs = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Grs*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[my+ny*femMesh->Nq];
id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gsr = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G01ID*femMesh->Np];
val += Gsr*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[ny+my*femMesh->Nq];
}
if (ny==my) {
id = mx+ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Grt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np];
val += Grt*femMesh->D[nx+mx*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq];
id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G02ID*femMesh->Np];
val += Gst*femMesh->D[mx+nx*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq];
}
if ((nx==mx)&&(nz==mz)) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+k*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gss = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G11ID*femMesh->Np];
val += Gss*femMesh->D[ny+k*femMesh->Nq]*femMesh->D[my+k*femMesh->Nq];
}
}
if (nx==mx) {
id = nx+my*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat Gst = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np];
val += Gst*femMesh->D[ny+my*femMesh->Nq]*femMesh->D[mz+nz*femMesh->Nq];
id = nx+ny*femMesh->Nq+mz*femMesh->Nq*femMesh->Nq;
dfloat Gts = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G12ID*femMesh->Np];
val += Gts*femMesh->D[my+ny*femMesh->Nq]*femMesh->D[nz+mz*femMesh->Nq];
}
if ((nx==mx)&&(ny==my)) {
for (int k=0;k<femMesh->Nq;k++) {
id = nx+ny*femMesh->Nq+k*femMesh->Nq*femMesh->Nq;
dfloat Gtt = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + G22ID*femMesh->Np];
val += Gtt*femMesh->D[nz+k*femMesh->Nq]*femMesh->D[mz+k*femMesh->Nq];
}
}
if ((nx==mx)&&(ny==my)&&(nz==mz)) {
id = nx + ny*femMesh->Nq+nz*femMesh->Nq*femMesh->Nq;
dfloat JW = femMesh->ggeo[e*femMesh->Np*femMesh->Nggeo + id + GWJID*femMesh->Np];
val += JW*lambda;
}
// pack non-zero
dfloat nonZeroThreshold = 1e-7;
if (fabs(val) >= nonZeroThreshold) {
#pragma omp critical
{
A[*cnt].val = val;
A[*cnt].row = globalNumbering[idn];
A[*cnt].col = globalNumbering[idm];
A[*cnt].ownerRank = globalOwners[idn];
(*cnt)++;
}
}
}
}
}
}
}
}
}
}
|
task.c | #include <stdio.h>
#include <omp.h>
#include <stdlib.h>
int main(int argc, char **argv)
{
int num_of_threads = atoi(argv[1]);
omp_set_num_threads(num_of_threads);
long double* array = (long double*) calloc (num_of_threads, sizeof(long double));
#pragma omp parallel for ordered shared(array)
for(unsigned long int i = 1; i <= 1000000000; ++i)
array[omp_get_thread_num()] += 1.0 / i;
double sum = 0;
#pragma omp parallel for ordered shared(array) reduction(+:sum)
for (int i = num_of_threads - 1; i > -1; i--)
sum += array[i];
printf("Harm row sum is %.20lf\n", sum);
free(array);
return 0;
}
|
aux_align.h | #ifndef AUX_ALIGN_H
#define AUX_ALIGN_H
#include <cvpp/containers/matrix.h>
#include "class_map.h"
#include "class_group.h"
using namespace cvpp;
/// Align objects given two groups (modify second one)
void alignObjects( Map& main , Map& next ,
const double& thr = 2.0 )
{
SeqMatd M1( main.size() );
forLOOPi( M1.size() ) M1[i] = main[i].getM();
SeqMatd M2( next.size() );
forLOOPi( M2.size() ) M2[i] = next[i].getM();
Matd D = PINF * MatONESd( M2.size() );
Mati I = -1 * MatONESi( M2.size() );
#pragma omp parallel for
forLOOPi( M2.size() )
{
KDtreed tree( M2[i] );
forLOOPj( M1.size() )
{
SSeqi idxs; SSeqd dsts;
tree.knnSearch( M1[j] , 1 , idxs , dsts );
double min = PINF;
forLOOPk( dsts.size() ) if( dsts[k][0] < min ) min = dsts[k][0];
if( min < D(i) ) { D(i) = min ; I(i) = j; }
}
}
SeqGroup objs( M1.size() );
forLOOPi( M2.size() )
{
if( D(i) < thr ) objs[ I(i) ] = next[i];
else objs.push_back( next[i] );
}
next.objs = objs;
}
#endif
|
if-clause.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv)
{
int i, n=20, tid;
int a[n],suma=0,sumalocal,x;
if(argc < 2) {
fprintf(stderr,"[ERROR]-Falta iteraciones y numero de hebras\n");
exit(-1);
}
n = atoi(argv[1]);
x = atoi(argv[2]);
if (n>20) n=20;
for (i=0; i<n; i++) {
a[i] = i;
}
#pragma omp parallel num_threads(x) if(n>4) default(none) \
private(sumalocal,tid) shared(a,suma,n)
{
sumalocal=0;
tid=omp_get_thread_num();
#pragma omp for private(i) schedule(static) nowait
for (i=0; i<n; i++)
{
sumalocal += a[i];
printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid,i,a[i],sumalocal);
}
#pragma omp atomic
suma += sumalocal;
#pragma omp barrier
#pragma omp master
printf("thread master=%d imprime suma=%d\n",tid,suma);
}
}
|
fill_r_3c.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include "config.h"
#include "cint.h"
#include "gto/gto.h"
/*
* out[naoi,naoj,naok,comp] in F-order
*/
void GTOr3c_fill_s1(int (*intor)(), double complex *out, double complex *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const size_t naoi = ao_loc[ish1] - ao_loc[ish0];
const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t nij = naoi * naoj;
const int dims[] = {naoi, naoj, naok};
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish] - ao_loc[ish0];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
out += jp * naoi + ip;
int ksh, k0;
int shls[3];
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
shls[2] = ksh;
k0 = ao_loc[ksh ] - ao_loc[ksh0];
(*intor)(out+k0*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf);
}
}
static void zcopy_s2_igtj(double complex *out, double complex *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double complex *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
static void zcopy_s2_ieqj(double complex *out, double complex *in, int comp,
int ip, int nij, int nijk, int di, int dj, int dk)
{
const size_t dij = di * dj;
const size_t ip1 = ip + 1;
int i, j, k, ic;
double complex *pout, *pin;
for (ic = 0; ic < comp; ic++) {
for (k = 0; k < dk; k++) {
pout = out + k * nij;
pin = in + k * dij;
for (i = 0; i < di; i++) {
for (j = 0; j <= i; j++) {
pout[j] = pin[j*di+i];
}
pout += ip1 + i;
}
}
out += nijk;
in += dij * dk;
}
}
/*
* out[comp,naok,nij] in C-order
* nij = i1*(i1+1)/2 - i0*(i0+1)/2
* [ \ ]
* [**** ]
* [***** ]
* [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound
* [ \]
*/
void GTOr3c_fill_s2ij(int (*intor)(), double complex *out, double complex *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
ish += ish0;
jsh += jsh0;
const int ip = ao_loc[ish];
const int jp = ao_loc[jsh] - ao_loc[jsh0];
if (ip < jp) {
return;
}
const int ksh0 = shls_slice[4];
const int ksh1 = shls_slice[5];
const int i0 = ao_loc[ish0];
const int i1 = ao_loc[ish1];
const size_t naok = ao_loc[ksh1] - ao_loc[ksh0];
const size_t off = i0 * (i0 + 1) / 2;
const size_t nij = i1 * (i1 + 1) / 2 - off;
const size_t nijk = nij * naok;
const int di = ao_loc[ish+1] - ao_loc[ish];
const int dj = ao_loc[jsh+1] - ao_loc[jsh];
out += ip * (ip + 1) / 2 - off + jp;
int ksh, dk, k0;
int shls[3];
dk = GTOmax_shell_dim(ao_loc, shls_slice, 3);
double *cache = (double *)(buf + di * dj * dk * comp);
shls[0] = ish;
shls[1] = jsh;
for (ksh = ksh0; ksh < ksh1; ksh++) {
shls[2] = ksh;
dk = ao_loc[ksh+1] - ao_loc[ksh];
k0 = ao_loc[ksh ] - ao_loc[ksh0];
(*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache);
if (ip != jp) {
zcopy_s2_igtj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk);
} else {
zcopy_s2_ieqj(out+k0*nij, buf, comp, ip, nij, nijk, di, dj, dk);
}
}
}
void GTOr3c_fill_s2jk(int (*intor)(), double complex *out, double complex *buf,
int comp, int ish, int jsh,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
fprintf(stderr, "GTOr3c_fill_s2jk not implemented\n");
exit(1);
}
void GTOr3c_drv(int (*intor)(), void (*fill)(), double complex *eri, int comp,
int *shls_slice, int *ao_loc, CINTOpt *cintopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int ish0 = shls_slice[0];
const int ish1 = shls_slice[1];
const int jsh0 = shls_slice[2];
const int jsh1 = shls_slice[3];
const int nish = ish1 - ish0;
const int njsh = jsh1 - jsh0;
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 3,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int ish, jsh, ij;
double complex *buf = malloc(sizeof(double complex) *
(di*di*di*comp + cache_size/2));
#pragma omp for schedule(dynamic)
for (ij = 0; ij < nish*njsh; ij++) {
ish = ij / njsh;
jsh = ij % njsh;
(*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc,
cintopt, atm, natm, bas, nbas, env);
}
free(buf);
}
}
|
sidh_vow_base.c | #include <assert.h>
#include "sidh_vow_base.h"
#include "types/triples.h"
#include "types/state.h"
#include "curve_math.h"
#include "bintree.h"
#include "triples.h"
// Functions for swig interface
#include "swig_helpers.c"
#include "state.c"
/* Initializations */
static st_t init_st(uint64_t nwords_state)
{
st_t s;
s.words = calloc(nwords_state, sizeof(digit_t));
return s;
}
static void free_st(st_t *s)
{
free(s->words);
}
static void ConcatArray(unsigned long *cat, unsigned long *A, unsigned long lenA, unsigned long *B, unsigned long lenB)
{
unsigned int i;
for (i = 1; i < lenA + 1; i++)
cat[i] = A[i - 1];
for (i = 0; i < lenB; i++)
cat[lenA + i + 1] = B[i];
}
static unsigned long OptStrat(unsigned long *strat, const unsigned long n, const double p, const double q)
{
unsigned int i, j, b, ctr = 2;
/* Maximum size of strategy = 250 */
double C[250], newCpqs[250], newCpq, tmpCpq;
unsigned long S[250][250];
unsigned long lens[250];
lens[1] = 0;
S[1][0] = 0;
C[1] = 0;
for (i = 2; i < n + 1; i++) {
for (b = 1; b < i; b++)
newCpqs[b] = C[i - b] + C[b] + b * p + (i - b) * q;
newCpq = newCpqs[1];
b = 1;
for (j = 2; j < i; j++) {
tmpCpq = newCpqs[j];
if (newCpq > tmpCpq) {
newCpq = tmpCpq;
b = j;
}
}
S[ctr][0] = b;
ConcatArray(S[ctr], S[i - b], lens[i - b], S[b], lens[b]);
C[ctr] = newCpqs[b];
lens[ctr] = 1 + lens[i - b] + lens[b];
ctr++;
}
for (i = 0; i < lens[ctr - 1]; i++)
strat[i] = S[ctr - 1][i];
return lens[ctr - 1];
}
static void xDBL_affine(const point_proj_t P, point_proj_t Q, const f2elm_t a24)
{
f2elm_t t0, t1;
fp2sub(P->X, P->Z, t0); // t0 = X1-Z1
fp2add(P->X, P->Z, t1); // t1 = X1+Z1
fp2sqr_mont(t0, t0); // t0 = (X1-Z1)^2
fp2sqr_mont(t1, t1); // t1 = (X1+Z1)^2
fp2mul_mont(t0, t1, Q->X); // X2 = C24*(X1-Z1)^2*(X1+Z1)^2
fp2sub(t1, t0, t1); // t1 = (X1+Z1)^2-(X1-Z1)^2
fp2mul_mont(a24, t1, Q->Z); // t0 = A24plus*[(X1+Z1)^2-(X1-Z1)^2]
fp2add(Q->Z, t0, Q->Z); // Z2 = A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2
fp2mul_mont(Q->Z, t1, Q->Z); // Z2 = [A24plus*[(X1+Z1)^2-(X1-Z1)^2] + C24*(X1-Z1)^2]*[(X1+Z1)^2-(X1-Z1)^2]
}
static void xDBLe_affine(const point_proj_t P, point_proj_t Q, const f2elm_t a24, const int e)
{ // Computes [2^e](X:Z) on Montgomery curve with projective constant via e repeated doublings.
// Input: projective Montgomery x-coordinates P = (XP:ZP), such that xP=XP/ZP and Montgomery curve constants A+2C and 4C.
// Output: projective Montgomery x-coordinates Q <- (2^e)*P.
copy_words((digit_t *)P, (digit_t *)Q, 2 * 2 * NWORDS_FIELD);
for (int i = 0; i < e; i++)
xDBL_affine(Q, Q, a24);
}
static void GetFourIsogenyWithKernelXeqZ(point_proj_t A24, const f2elm_t a24)
{
fp2copy(a24, A24->X);
fp2copy(a24, A24->Z);
fpsub((digit_t *)A24->Z, (digit_t *)&Montgomery_one, (digit_t *)A24->Z);
}
static void EvalFourIsogenyWithKernelXeqZ(point_proj_t S, const f2elm_t a24)
{
f2elm_t T0, T1, T2;
fpzero(T1[1]); // Set RZ = 1
fpcopy((digit_t *)&Montgomery_one, T1[0]);
fp2add(S->X, S->Z, T0);
fp2sub(S->X, S->Z, T2);
fp2sqr_mont(T0, T0);
fp2sqr_mont(T2, T2);
fp2sub(T1, a24, T1);
fp2add(T1, T1, T1);
fp2add(T1, T1, T1);
fp2mul_mont(T1, S->X, T1);
fp2mul_mont(T1, S->Z, T1);
fp2mul_mont(T1, T2, S->Z);
fp2sub(T0, T1, T1);
fp2mul_mont(T0, T1, S->X);
}
static void GetFourIsogenyWithKernelXeqMinZ(point_proj_t A24, const f2elm_t a24)
{
fp2copy(a24, A24->X);
fp2copy(a24, A24->Z);
fpsub((digit_t *)A24->X, (digit_t *)&Montgomery_one, (digit_t *)A24->X);
}
static void EvalFourIsogenyWithKernelXeqMinZ(point_proj_t S, const f2elm_t a24)
{
f2elm_t T0, T1, T2;
fp2add(S->X, S->Z, T2);
fp2sub(S->X, S->Z, T0);
fp2sqr_mont(T2, T2);
fp2sqr_mont(T0, T0);
fp2add(a24, a24, T1);
fp2add(T1, T1, T1);
fp2mul_mont(T1, S->X, T1);
fp2mul_mont(T1, S->Z, T1);
fp2mul_mont(T1, T2, S->Z);
fp2neg(S->Z);
fp2add(T0, T1, T1);
fp2mul_mont(T0, T1, S->X);
}
static void FourwayInv(f2elm_t X, f2elm_t Y, f2elm_t Z, f2elm_t T)
{
f2elm_t Xt, Zt, XY, ZT, XYZT;
fp2copy(X, Xt);
fp2copy(Z, Zt);
fp2mul_mont(X, Y, XY);
fp2mul_mont(Z, T, ZT);
fp2mul_mont(XY, ZT, XYZT);
fp2inv_mont(XYZT); /* 1 / XYZT */
fp2mul_mont(ZT, XYZT, ZT); /* 1 / XY */
fp2mul_mont(XY, XYZT, XY); /* 1 / ZT */
fp2mul_mont(ZT, Y, X);
fp2mul_mont(ZT, Xt, Y);
fp2mul_mont(XY, T, Z);
fp2mul_mont(XY, Zt, T);
}
/* Simple functions on states */
static unsigned char GetC_SIDH(const st_t *s)
{
return (s->bytes[0] & 0x01);
}
static unsigned char GetB_SIDH(const st_t *s)
{
return ((s->bytes[0] & 0x06) >> 1);
}
static void SetB_SIDH(st_t *s, const unsigned char t)
{
/* Assumes that b is set to 0x03 */
s->bytes[0] &= ((t << 1) | 0xF9);
}
static void copy_st(st_t *r, const st_t *s, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
r->words[i] = s->words[i];
}
static void copy_st2uint64(uint64_t *r, const st_t *s, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
r[i] = s->words[i];
}
static void copy_uint642st(st_t *r, const uint64_t *s, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
r->words[i] = s[i];
}
static void SwapStSIDH(st_t *r, st_t *s, uint64_t nwords_state)
{
st_t t = init_st(nwords_state);
copy_st(&t, r, nwords_state);
copy_st(r, s, nwords_state);
copy_st(s, &t, nwords_state);
free_st(&t);
}
bool is_equal_st(const st_t *s, const st_t *t, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
{
if (s->words[i] != t->words[i])
return false;
}
return true;
}
static bool is_equal_st_words(const st_t *s, const uint64_t *r, const uint64_t nwords_state)
{
for (unsigned int i = 0; i < nwords_state; i++)
{
if (s->words[i] != r[i])
return false;
}
return true;
}
bool IsEqualJinvSIDH(unsigned char j0[FP2_ENCODED_BYTES], unsigned char j1[FP2_ENCODED_BYTES])
{
for (unsigned int i = 0; i < FP2_ENCODED_BYTES; i++)
{
if (j0[i] != j1[i])
return false;
}
return true;
}
void copy_trip(trip_t *s, const trip_t *t, const uint64_t nwords_state)
{
copy_st(&s->current_state, &t->current_state, nwords_state);
s->current_steps = t->current_steps;
copy_st(&s->initial_state, &t->initial_state, nwords_state);
}
/* Functions for vOW */
bool DistinguishedSIDH(private_state_t *private_state)
{
/* Divide distinguishedness over interval to avoid bad cases */
assert(private_state->MEMORY_LOG_SIZE > EXTRA_MEM_LOG_SIZE);
uint64_t val;
val = private_state->current.current_state.words[0] >> (private_state->MEMORY_LOG_SIZE + EXTRA_MEM_LOG_SIZE);
val += (uint64_t)private_state->function_version * (uint64_t)private_state->DIST_BOUND;
val &= (((uint64_t)1 << (private_state->NBITS_STATE - EXTRA_MEM_LOG_SIZE - private_state->MEMORY_LOG_SIZE)) - 1);
return (val <= (uint64_t)private_state->DIST_BOUND);
}
uint64_t MemIndexSIDH(private_state_t *private_state)
{
assert(private_state->MEMORY_SIZE <= (pow(2, RADIX - 3) - 1)); /* Assumes that MEMORY_SIZE <= 2^RADIX */
return (uint64_t)(((private_state->current.current_state.words[0] >> EXTRA_MEM_LOG_SIZE) + private_state->random_functions) & private_state->MEMORY_SIZE_MASK);
}
static unsigned int GetMSBSIDH(const unsigned char *m, unsigned long nbits_state)
{
int msb = nbits_state;
int bit = (m[(msb - 1) >> 3] >> ((msb - 1) & 0x07)) & 1;
while ((bit == 0) && (msb > 0))
{
msb--;
bit = (m[(msb - 1) >> 3] >> ((msb - 1) & 0x07)) & 1;
}
return msb;
}
void UpdateSIDH(private_state_t *private_state)
{
uint64_t i, temp;
unsigned char j[FP2_ENCODED_BYTES];
UpdateStSIDH(j, &private_state->current.current_state, &private_state->current.current_state, private_state);
private_state->number_steps_collect += 1;
if (private_state->HANSEL_GRETEL) {
if (private_state->crumbs.num_crumbs < private_state->crumbs.max_crumbs) {
copy_st2uint64(&private_state->crumbs.crumbs[private_state->crumbs.position], &private_state->current.current_state, private_state->NWORDS_STATE);
private_state->crumbs.positions[private_state->crumbs.position] = private_state->crumbs.position;
private_state->crumbs.index_crumbs[private_state->crumbs.position] = private_state->crumbs.position;
private_state->crumbs.num_crumbs++;
} else if (private_state->crumbs.position - private_state->crumbs.positions[private_state->crumbs.max_crumbs - 1] == private_state->crumbs.max_dist) {
temp = private_state->crumbs.index_crumbs[private_state->crumbs.index_position];
for (i = private_state->crumbs.index_position; i < private_state->crumbs.max_crumbs - 1; i++) {
// Updating table with crumb indices for the crump table
private_state->crumbs.index_crumbs[i] = private_state->crumbs.index_crumbs[i + 1];
}
private_state->crumbs.index_crumbs[private_state->crumbs.max_crumbs - 1] = temp;
private_state->crumbs.index_position++;
if (private_state->crumbs.index_position > private_state->crumbs.max_crumbs - 1)
private_state->crumbs.index_position = 0;
copy_st2uint64(&private_state->crumbs.crumbs[temp], &private_state->current.current_state, private_state->NWORDS_STATE); // Inserting a new crumb at the end of the crumb table
for (i = private_state->crumbs.scratch_position; i < private_state->crumbs.max_crumbs - 1; i++) {
// Updating table with crumb positions
private_state->crumbs.positions[i] = private_state->crumbs.positions[i + 1];
}
private_state->crumbs.positions[private_state->crumbs.max_crumbs - 1] = private_state->crumbs.position;
private_state->crumbs.swap_position += 2 * private_state->crumbs.real_dist;
private_state->crumbs.scratch_position++;
if (private_state->crumbs.swap_position > private_state->crumbs.max_crumbs - 1) {
// Kind of cumbersome, maybe this can be simplified (but not time critical)
private_state->crumbs.swap_position = 0;
private_state->crumbs.real_dist <<= 1;
}
if (private_state->crumbs.scratch_position > private_state->crumbs.max_crumbs - 1) {
private_state->crumbs.scratch_position = 0;
private_state->crumbs.max_dist <<= 1;
private_state->crumbs.swap_position = private_state->crumbs.max_dist;
}
}
private_state->crumbs.position++;
}
}
void UpdateRandomFunctionSIDH(shared_state_t *S, private_state_t *private_state)
{
private_state->function_version++;
// reset "resync done" flag
if (private_state->thread_id == 0) {
S->resync_cores[0] = 0;
}
}
static inline bool BacktrackSIDH_core(trip_t *c0, trip_t *c1, shared_state_t *S, private_state_t *private_state)
{
unsigned long L, i;
st_t c0_, c1_;
unsigned char jinv0[FP2_ENCODED_BYTES], jinv1[FP2_ENCODED_BYTES];
f2elm_t jinv;
(void)private_state;
// Make c0 have the largest number of steps
if (c0->current_steps < c1->current_steps) {
SwapStSIDH(&c0->initial_state, &c1->initial_state, private_state->NWORDS_STATE);
L = (unsigned long)(c1->current_steps - c0->current_steps);
} else {
L = (unsigned long)(c0->current_steps - c1->current_steps);
}
// Catch up the trails
for (i = 0; i < L; i++) {
UpdateStSIDH(jinv0, &c0->initial_state, &c0->initial_state, private_state);
private_state->number_steps_locate += 1;
}
if (is_equal_st(&c0->initial_state, &c1->initial_state, private_state->NWORDS_STATE))
return false; // Robin Hood
c0_ = init_st(private_state->NWORDS_STATE);
c1_ = init_st(private_state->NWORDS_STATE);
for (i = 0; i < c1->current_steps + 1; i++) {
UpdateStSIDH(jinv0, &c0_, &c0->initial_state, private_state);
private_state->number_steps_locate += 1;
UpdateStSIDH(jinv1, &c1_, &c1->initial_state, private_state);
private_state->number_steps_locate += 1;
if (IsEqualJinvSIDH(jinv0, jinv1)) {
/* Record collision */
private_state->collisions += 1;
if (private_state->collect_vow_stats) {
#pragma omp critical
{
insertTree(&S->dist_cols, c0->initial_state, c1->initial_state, private_state->NWORDS_STATE);
}
}
// free tmp states
free_st(&c0_);
free_st(&c1_);
if (GetC_SIDH(&c0->initial_state) == GetC_SIDH(&c1->initial_state)) {
return false;
} else {
fp2_decode(jinv0, jinv);
assert(fp2_is_equal(jinv, private_state->jinv)); /* Verify that we found the right one*/
return true;
}
} else {
copy_st(&c0->initial_state, &c0_, private_state->NWORDS_STATE);
copy_st(&c1->initial_state, &c1_, private_state->NWORDS_STATE);
}
}
/* Should never reach here */
return false;
}
static inline bool BacktrackSIDH_Hansel_Gretel(trip_t *c_mem, trip_t *c_crumbs, shared_state_t *S, private_state_t *private_state)
{
uint64_t L;
trip_t c0_, cmem;
uint64_t i, k, index;
uint64_t crumb;
bool resp, equal;
unsigned char j[FP2_ENCODED_BYTES];
cmem = init_trip(private_state->NWORDS_STATE);
copy_trip(&cmem, c_mem, private_state->NWORDS_STATE);
// Make the memory trail (without crumbs) at most the length of the crumbs trail.
if (cmem.current_steps > c_crumbs->current_steps) {
L = cmem.current_steps - c_crumbs->current_steps;
for (i = 0; i < L; i++) {
UpdateStSIDH(j, &cmem.initial_state, &cmem.initial_state, private_state);
private_state->number_steps_locate += 1;
}
cmem.current_steps = c_crumbs->current_steps;
}
// Check for Robin Hood
if (is_equal_st(&cmem.initial_state, &c_crumbs->initial_state, private_state->NWORDS_STATE))
return false;
// The memory path is L steps shorter than the crumbs path.
L = c_crumbs->current_steps - cmem.current_steps;
k = 0;
// Since there has been at least one step, there is at least one crumb.
// Crumbs only store intermediate points, not the initial state and not
// necessarily the current state.
index = private_state->crumbs.positions[0] + 1;
while ((L > index) && (k + 1 < private_state->crumbs.num_crumbs)) {
// There are still crumbs to check and we haven't found the next crumb to reach.
k++;
index = private_state->crumbs.positions[k] + 1;
}
// Either have found the next crumb or ran out of crumbs to check.
if (L > index) {
// Ran out of crumbs to check, i.e. already in the interval beyond the last crumb.
// Trails collide after last crumb.
// Call original BacktrackGen on memory trail and shortened crumbs trail.
copy_uint642st(&c_crumbs->initial_state, &private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k]], private_state->NWORDS_STATE);
c_crumbs->current_steps -= (private_state->crumbs.positions[k] + 1);
resp = BacktrackSIDH_core(&cmem, c_crumbs, S, private_state);
} else {
// Next crumb to check lies before (or is) the last crumb.
c0_ = init_trip(private_state->NWORDS_STATE);
copy_trip(&c0_, &cmem, private_state->NWORDS_STATE);
do
{
cmem.current_steps = c0_.current_steps;
copy_st(&cmem.initial_state, &c0_.initial_state, private_state->NWORDS_STATE);
crumb = private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k]];
index = private_state->crumbs.positions[k] + 1;
L = cmem.current_steps - (c_crumbs->current_steps - index);
for (i = 0; i < L; i++) {
UpdateStSIDH(j, &c0_.initial_state, &c0_.initial_state, private_state);
private_state->number_steps_locate += 1;
}
c0_.current_steps -= L;
k++;
equal = is_equal_st_words(&c0_.initial_state, &crumb, private_state->NWORDS_STATE);
} while (!equal && k < private_state->crumbs.num_crumbs);
// Either found the colliding crumb or moved to the interval beyond the last crumb.
if (equal) { // Have a colliding crumb
copy_uint642st(&cmem.current_state, &crumb, private_state->NWORDS_STATE);
cmem.current_steps -= c0_.current_steps;
if (k == 1) {
c0_.current_steps = private_state->crumbs.positions[0] + 1;
copy_uint642st(&c0_.initial_state, c_crumbs->initial_state.words, private_state->NWORDS_STATE);
} else {
c0_.current_steps = private_state->crumbs.positions[k - 1] - private_state->crumbs.positions[k - 2];
copy_uint642st(&c0_.initial_state, &private_state->crumbs.crumbs[private_state->crumbs.index_crumbs[k - 2]], private_state->NWORDS_STATE);
}
copy_uint642st(&c0_.current_state, &crumb, private_state->NWORDS_STATE);
} else { // Collision happens after the last crumb.
cmem.current_steps = c0_.current_steps;
copy_uint642st(&cmem.initial_state, &crumb, private_state->NWORDS_STATE);
}
resp = BacktrackSIDH_core(&cmem, &c0_, S, private_state);
free_trip(&c0_);
}
free_trip(&cmem);
return resp;
}
bool BacktrackSIDH(trip_t *c0, trip_t *c1, shared_state_t *S, private_state_t *private_state)
{ // Backtrack function selection
if (private_state->HANSEL_GRETEL)
return BacktrackSIDH_Hansel_Gretel(c0, c1, S, private_state);
else
return BacktrackSIDH_core(c0, c1, S, private_state);
}
|
kthread_test.c | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <pthread.h>
#if HAVE_CILK
#include <cilk/cilk.h>
#include <cilk/cilk_api.h>
#endif
#include "kthread.h"
typedef struct {
int max_iter, w, h;
double xmin, xmax, ymin, ymax;
int *k;
} global_t;
static void compute(void *_g, int i, int tid)
{
global_t *g = (global_t*)_g;
double x, x0 = g->xmin + (g->xmax - g->xmin) * (i%g->w) / g->w;
double y, y0 = g->ymin + (g->ymax - g->ymin) * (i/g->w) / g->h;
int k;
assert(g->k[i] < 0);
x = x0, y = y0;
for (k = 0; k < g->max_iter; ++k) {
double z = x * y;
x *= x; y *= y;
if (x + y >= 4) break;
x = x - y + x0;
y = z + z + y0;
}
g->k[i] = k;
}
int main(int argc, char *argv[])
{
int i, tmp, tot, type = 0, n_threads = 2;
global_t global = { 10240*100, 800, 600, -2., -1.2, -1.2, 1.2, 0 };
// global_t global = { 10240*1, 8, 6, -2., -1.2, -1.2, 1.2, 0 };
if (argc > 1) {
type = argv[1][0] == 'o'? 2 : argv[1][0] == 'c'? 3 : argv[1][0] == 'n'? 1 : 0;
if (argv[1][0] >= '0' && argv[1][0] <= '9')
n_threads = atoi(argv[1]);
} else {
fprintf(stderr, "Usage: ./a.out [openmp | cilk | #threads]\n");
}
tot = global.w * global.h;
global.k = calloc(tot, sizeof(int));
for (i = 0; i < tot; ++i) global.k[i] = -1;
if (type == 0) {
kt_for(n_threads, tot, compute, &global);
} else if (type == 2) {
#pragma omp parallel for
for (i = 0; i < tot; ++i)
compute(&global, i, 0);
} else if (type == 3) {
#if HAVE_CILK
cilk_for (i = 0; i < tot; ++i)
compute(&global, i, 0);
#endif
}
for (i = tmp = 0; i < tot; ++i) tmp += (global.k[i] < 0);
free(global.k);
assert(tmp == 0);
return 0;
}
|
func.c | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPM
#include <omp.h>
#endif _OPM
double my_phi_func(int N) {
// initialize
int i;
double N_inv, tmp_denum, sum_tot;
sum_tot = 0.0;
N_inv = pow(N, -1);
double *sum_tmp = malloc(N * sizeof(double));
if(sum_tmp == NULL) {
printf("malloc of size %d failed!\n", N); // could also call perror here
exit(1); // or return an error to caller
}
#pragma omp parallel shared(sum_tmp, N_inv, N) private(tmp_denum, i)
{
// do loop
#pragma omp for
for (i = 0; i < N; i++) {
// don't do division
tmp_denum = 1 + pow((i - 0.5) * N_inv, 2);
sum_tmp[i] = 4 * pow(tmp_denum, -1);
}
} /* end of parallel */
for (i = 0; i < N; i++) {
sum_tot += sum_tmp[i];
}
// free
free(sum_tmp);
// division
sum_tot = sum_tot * N_inv;
// return
return(sum_tot);
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
inline std::string dtype_string(const int dtype) {
switch (dtype) {
case mshadow::kFloat32:
return "float";
case mshadow::kFloat64:
return "double";
case mshadow::kFloat16:
return "half";
case mshadow::kUint8:
return "unsigned char";
case mshadow::kInt8:
return "char";
case mshadow::kInt32:
return "int";
case mshadow::kInt64:
return "long long";
case mshadow::kBool:
return "bool";
default:
LOG(FATAL) << "Unknown type enum " << dtype;
}
return "unknown";
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_COPY_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
std::memcpy(dst, src, sizeof(DType) * size);
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
queues.c | // -*-Mode: C++;-*- // technically C99
// * BeginRiceCopyright *****************************************************
//
// $HeadURL$
// $Id$
//
// --------------------------------------------------------------------------
// Part of HPCToolkit (hpctoolkit.org)
//
// Information about sources of support for research and development of
// HPCToolkit is at 'hpctoolkit.org' and in 'README.Acknowledgments'.
// --------------------------------------------------------------------------
//
// Copyright ((c)) 2002-2022, Rice University
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// * Neither the name of Rice University (RICE) nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// This software is provided by RICE and contributors "as is" and any
// express or implied warranties, including, but not limited to, the
// implied warranties of merchantability and fitness for a particular
// purpose are disclaimed. In no event shall RICE or contributors be
// liable for any direct, indirect, incidental, special, exemplary, or
// consequential damages (including, but not limited to, procurement of
// substitute goods or services; loss of use, data, or profits; or
// business interruption) however caused and on any theory of liability,
// whether in contract, strict liability, or tort (including negligence
// or otherwise) arising in any way out of the use of this software, even
// if advised of the possibility of such damage.
//
// ******************************************************* EndRiceCopyright *
//*****************************************************************************
// local includes
//*****************************************************************************
#include "queues.h"
//*****************************************************************************
// interface functions
//*****************************************************************************
#define Sd(q) q.ptr
#define Sp(q) q->ptr
#define Ad(q) q.aptr
#define Ap(q) q->aptr
void
squeue_ptr_set
(
q_element_ptr_t *p,
q_element_t *v
)
{
Sp(p) = v;
}
q_element_t *
squeue_ptr_get
(
q_element_ptr_t *e
)
{
return Sp(e);
}
q_element_t *
squeue_swap
(
q_element_ptr_t *q,
q_element_t *r
)
{
q_element_t *e = Sp(q);
Sp(q) = r;
return e;
}
void
squeue_push
(
q_element_ptr_t *q,
q_element_t *e
)
{
e->Sd(next) = Sp(q);
Sp(q) = e;
}
q_element_t *
squeue_pop
(
q_element_ptr_t *q
)
{
q_element_t *e = 0;
if (Sp(q)) {
e = Sp(q);
Sp(q) = e->Sd(next);
e->Sd(next) = 0;
}
return e;
}
q_element_t *
squeue_steal
(
q_element_ptr_t *q
)
{
q_element_t *e = squeue_swap(q, 0);
return e;
}
void
cqueue_ptr_set
(
q_element_ptr_t *e,
q_element_t *v
)
{
atomic_init(&Ap(e), v);
}
q_element_t *
cqueue_ptr_get
(
q_element_ptr_t *e
)
{
return atomic_load(&Ap(e));
}
q_element_t *
cqueue_swap
(
q_element_ptr_t *q,
q_element_t *r
)
{
q_element_t *e = atomic_exchange(&Ap(q),r);
return e;
}
void
cqueue_push
(
q_element_ptr_t *q,
q_element_t *e
)
{
q_element_t *head = atomic_load(&Ap(q));
q_element_t *new_head = e;
// push a singleton or a chain on the list
for (;;) {
q_element_t *enext = atomic_load(&e->Ad(next));
if (enext == 0) break;
e = enext;
}
do {
atomic_store(&e->Ad(next), head);
} while (!atomic_compare_exchange_strong(&Ap(q), &head, new_head));
}
q_element_t *
cqueue_pop
(
q_element_ptr_t *q
)
{
q_element_t *oldhead = atomic_load(&Ap(q));
q_element_t *next = 0;
do {
if (oldhead == 0) return 0;
next = atomic_load(&oldhead->Ad(next));
} while (!atomic_compare_exchange_strong(&Ap(q), &oldhead, next));
atomic_store(&oldhead->Ad(next),0);
return oldhead;
}
q_element_t *
cqueue_steal
(
q_element_ptr_t *q
)
{
q_element_t *e = cqueue_swap(q, 0);
return e;
}
//*****************************************************************************
// unit test
//*****************************************************************************
#define UNIT_TEST 0
#if UNIT_TEST
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
typedef struct {
q_element_ptr_t next;
int value;
} typed_queue_elem(int);
typedef q_element_ptr_t typed_queue_elem_ptr(int);
typed_queue_elem_ptr(int) queue;
#define qtype cqueue
typed_queue(int, qtype)
typed_queue_elem(int) *
typed_queue_elem_fn(int,new)(int value)
{
typed_queue_elem(int) *e =(typed_queue_elem(int) *) malloc(sizeof(int_q_element_t));
e->value = value;
typed_queue_elem_ptr_set(int, qtype)(&e->next, 0);
}
void
pop
(
int n
)
{
int i;
for(i = 0; i < n; i++) {
typed_queue_elem(int) *e = typed_queue_pop(int, qtype)(&queue);
if (e == 0) {
printf("%d queue empty\n", omp_get_thread_num());
break;
} else {
printf("%d popping %d\n", omp_get_thread_num(), e->value);
}
}
}
void
push
(
int min,
int n
)
{
int i;
for(i = min; i < min+n; i++) {
printf("%d pushing %d\n", omp_get_thread_num(), i);
typed_queue_push(int, qtype)(&queue, typed_queue_elem_fn(int, new)(i));
}
}
void
dump
(
int_q_element_t *e
)
{
int i;
for(; e; e = (int_q_element_t *) typed_queue_elem_ptr_get(int,qtype)(&e->next)) {
printf("%d stole %d\n", omp_get_thread_num(), e->value);
}
}
int
main
(
int argc,
char **argv
)
{
typed_queue_elem_ptr_set(int, qtype)(&queue, 0);
#pragma omp parallel
{
push(0, 30);
pop(10);
push(100, 12);
// pop(100);
int_q_element_t *e = typed_queue_steal(int, qtype)(&queue);
dump(e);
push(300, 30);
typed_queue_push(int, qtype)(&queue, e);
pop(100);
}
}
#endif
|
GB_binop__isne_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint32)
// A*D function (colscale): GB (_AxD__isne_uint32)
// D*A function (rowscale): GB (_DxB__isne_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint32)
// C=scalar+B GB (_bind1st__isne_uint32)
// C=scalar+B' GB (_bind1st_tran__isne_uint32)
// C=A+scalar GB (_bind2nd__isne_uint32)
// C=A'+scalar GB (_bind2nd_tran__isne_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT32 || GxB_NO_ISNE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
biotsavart.h | #pragma once
#include "EigenInclude.h"
#include "types.h"
#include "mapping.h"
#include "debugutils.h"
#include <limits>
#include <math.h>
#include <cmath>
#define VORTEX_RADIUS_DEF 1e-6
// #define VORTEX_RADIUS_SQ 1e-4
#define Nvert 4
// Declaration for parallel computing
#pragma omp declare reduction (sum_Vector3 : UVLM::Types::Vector3 : omp_out += omp_in) initializer(omp_priv = UVLM::Types::zeroVector3())
namespace UVLM
{
namespace BiotSavart
{
// DECLARATIONS
template <typename t_zeta,
typename t_gamma,
typename t_tsurface,
typename t_uout,
typename t_normals>
void multisurface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_tsurface& target_surface,
t_uout& uout,
// const UVLM::Types::IntPair& dimensions,
const bool& image_method = false,
const t_normals& normal = NULL,
// const bool& horseshoe = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void multisurface_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
const bool& horseshoe,
t_uout& uout,
const bool& image_method = false,
const t_normals& normal = NULL,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void multisurface_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const int& n_rows = -1,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_gamma,
typename t_ttriad,
typename t_uout>
void surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
t_uout& uout,
unsigned int Mstart = 0,
unsigned int Nstart = 0,
unsigned int Mend = -1,
unsigned int Nend = -1,
const bool& image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void surface_with_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
const bool& horseshoe,
t_uout& uout,
const bool& image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void surface_with_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
t_uout& uout,
const bool& image_method,
const int& n_rows = -1, // default val = -1
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_triad,
typename t_block>
//typename t_uind>
UVLM::Types::Vector3 vortex_ring
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma_star,
// t_uind& uind,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_triad,
typename t_block>
void horseshoe
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma,
UVLM::Types::Vector3& uind,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_triad>
//typename t_uind>
UVLM::Types::Vector3 segment
(
const t_triad& target_triad,
const UVLM::Types::Vector3& v1,
const UVLM::Types::Vector3& v2,
const UVLM::Types::Real& gamma,
// t_uind& uind,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_uout>
void total_induced_velocity_on_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
t_uout& uout,
const bool& image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_gamma,
typename t_zeta_col,
typename t_u_ind>
void whole_surface_on_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_zeta_col& zeta_col,
t_u_ind& u_ind,
const bool image_method = false,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
template <typename t_zeta,
typename t_gamma,
typename t_ttriad>
// typename t_uout>
UVLM::Types::Vector3 whole_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
// t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius,
unsigned int Mstart = 0,
unsigned int Nstart = 0
);
template <typename t_ttriad,
typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star>
UVLM::Types::Vector3 total_induced_velocity_on_point
(
const t_ttriad& target_triad,
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const bool& image_method,
const UVLM::Types::Real& vortex_radius = VORTEX_RADIUS_DEF
);
}
}
namespace UVLMlin{
void biot_panel_map( map_RowVec3& velP,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius);
void der_biot_panel(Matrix3d& DerP,
Matrix3d DerVertices[Nvert],
const RowVector3d zetaP,
const Matrix4by3d ZetaPanel,
const double gamma);
void der_biot_panel_map( map_Mat3by3& DerP,
Vec_map_Mat3by3& DerVertices,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius);
void der_runit( Matrix3d& Der,
const RowVector3d& rv,
double rinv,
double minus_rinv3);
Matrix3d Dvcross_by_skew3d(const Matrix3d& Dvcross,
const RowVector3d& rv);
void dvinddzeta(map_Mat3by3 DerC,
map_Mat DerV,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
int& Kzeta_in,
bool& IsBound,
int& M_in_bound, // M of bound surf associated
int& Kzeta_in_bound,
double vortex_radius
);
void aic3( map_Mat AIC3,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
int& M_in,
int& N_in,
double vortex_radius);
void ind_vel(map_RowVec3 velC,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
double vortex_radius);
}
// SOURCE CODE
template <typename t_triad>
inline UVLM::Types::Vector3 UVLM::BiotSavart::segment
(
const t_triad& rp,
const UVLM::Types::Vector3& v1,
const UVLM::Types::Vector3& v2,
const UVLM::Types::Real& gamma,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::Vector3 uind;
UVLM::Types::Real r0[3], r0_mod;
UVLM::Types::Real r1[3], r1_mod;
UVLM::Types::Real r2[3], r2_mod;
r0_mod = 0.0;
r1_mod = 0.0;
r2_mod = 0.0;
// hopefully this loop is unrolled
for (uint i=0; i<3; ++i)
{
r0[i] = v2(i) - v1(i);
r1[i] = rp(i) - v1(i);
r2[i] = rp(i) - v2(i);
r0_mod += r0[i]*r0[i];
r1_mod += r1[i]*r1[i];
r2_mod += r2[i]*r2[i];
}
r0_mod = sqrt(r0_mod);
r1_mod = sqrt(r1_mod);
r2_mod = sqrt(r2_mod);
if ((r1_mod < vortex_radius) || (r2_mod < vortex_radius)){
uind(0) = 0.0;
uind(1) = 0.0;
uind(2) = 0.0;
return uind;
}else{
UVLM::Types::Real r1_cross_r2[3];
r1_cross_r2[0] = r1[1]*r2[2] - r1[2]*r2[1];
r1_cross_r2[1] = r1[2]*r2[0] - r1[0]*r2[2];
r1_cross_r2[2] = r1[0]*r2[1] - r1[1]*r2[0];
UVLM::Types::Real r1_cross_r2_mod_sq;
r1_cross_r2_mod_sq = r1_cross_r2[0]*r1_cross_r2[0] +
r1_cross_r2[1]*r1_cross_r2[1] +
r1_cross_r2[2]*r1_cross_r2[2];
if (r1_cross_r2_mod_sq < vortex_radius*vortex_radius){
uind(0) = 0.0;
uind(1) = 0.0;
uind(2) = 0.0;
return uind;
}else{
UVLM::Types::Real r0_dot_r1;
r0_dot_r1 = r0[0]*r1[0] +
r0[1]*r1[1] +
r0[2]*r1[2];
UVLM::Types::Real r0_dot_r2;
r0_dot_r2 = r0[0]*r2[0] +
r0[1]*r2[1] +
r0[2]*r2[2];
UVLM::Types::Real K;
K = (gamma*UVLM::Constants::INV_PI4/(r1_cross_r2_mod_sq))*
(r0_dot_r1/r1_mod - r0_dot_r2/r2_mod);
uind(0) = K*r1_cross_r2[0];
uind(1) = K*r1_cross_r2[1];
uind(2) = K*r1_cross_r2[2];
return uind;
}
}
}
template <typename t_triad,
typename t_block>
void UVLM::BiotSavart::horseshoe
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma_star,
UVLM::Types::Vector3& uind,
const UVLM::Types::Real& vortex_radius
)
{
// three segments.
//
// 0___________3
// | |
// | |
// | |
// | |
// | |
// | |
// | |
// 1| |2
//
// segments 0-1 and 2-3 are represented as length 1, but they are effectively
// infinite
// segment 3-0 is considered as a normal one
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
// segment 3-0
v1 << x(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
y(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
z(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1));
v2 << x(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
y(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
z(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1));
uind += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
gamma_star,
vortex_radius);
// uind);
// segment 0-1
v1 << x(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
y(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1)),
z(UVLM::Mapping::vortex_indices(0, 0),
UVLM::Mapping::vortex_indices(0, 1));
v2 << x(UVLM::Mapping::vortex_indices(1, 0),
UVLM::Mapping::vortex_indices(1, 1)),
y(UVLM::Mapping::vortex_indices(1, 0),
UVLM::Mapping::vortex_indices(1, 1)),
z(UVLM::Mapping::vortex_indices(1, 0),
UVLM::Mapping::vortex_indices(1, 1));
// here the segment will be considered as 1----->2 and the
// point 2 is in infinity, so beta2=pi
UVLM::Types::Vector3 r0 = v2 - v1;
UVLM::Types::Vector3 r1 = target_triad - v1;
UVLM::Types::Vector3 r2 = target_triad - v2;
UVLM::Types::Vector3 r1_cross_r2 = r1.cross(r2);
UVLM::Types::Real dist = (r1_cross_r2).norm()/r0.norm();
UVLM::Types::Real beta1;
UVLM::Types::Real beta2;
UVLM::Types::Vector3 u_radial;
if (!((r1.norm() < vortex_radius) ||
(r2.norm() < vortex_radius) ||
(r1_cross_r2.norm() < vortex_radius)))
{
beta1 = r0.dot(r1)/(r0.norm()*r1.norm());
beta2 = UVLM::Constants::PI;
u_radial = (r1_cross_r2)/(r1_cross_r2).norm();
uind += gamma_star/(UVLM::Constants::PI4*dist)*(beta1 + 1.0)*
u_radial;
}
// segment 2-3
v1 << x(UVLM::Mapping::vortex_indices(2, 0),
UVLM::Mapping::vortex_indices(2, 1)),
y(UVLM::Mapping::vortex_indices(2, 0),
UVLM::Mapping::vortex_indices(2, 1)),
z(UVLM::Mapping::vortex_indices(2, 0),
UVLM::Mapping::vortex_indices(2, 1));
v2 << x(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
y(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1)),
z(UVLM::Mapping::vortex_indices(3, 0),
UVLM::Mapping::vortex_indices(3, 1));
// here the segment will be considered as 1----->2 and the
// point 1 is in infinity, so beta1=0
r0 = v2 - v1;
r1 = target_triad - v1;
r2 = target_triad - v2;
r1_cross_r2 = r1.cross(r2);
dist = (r1_cross_r2).norm()/r0.norm();
if (!((r1.norm() < vortex_radius) ||
(r2.norm() < vortex_radius) ||
(r1_cross_r2.norm() < vortex_radius)))
{
beta2 = r0.dot(r2)/(r0.norm()*r2.norm());
dist = (r1.cross(r2)).norm()/r0.norm();
u_radial = (r1_cross_r2)/(r1_cross_r2).norm();
uind += gamma_star/(UVLM::Constants::PI4*dist)*(1.0 - beta2)*
u_radial;
}
}
template <typename t_triad,
typename t_block>
// typename t_uind>
UVLM::Types::Vector3 UVLM::BiotSavart::vortex_ring
(
const t_triad& target_triad,
const t_block& x,
const t_block& y,
const t_block& z,
const UVLM::Types::Real& gamma,
// t_uind& uind,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::Vector3 uind;
uind.setZero();
if (std::abs(gamma) < UVLM::Constants::EPSILON)
{
return uind;
}
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
const unsigned int n_segment = 4;
for (unsigned int i_segment=0; i_segment<n_segment; ++i_segment)
{
unsigned int start = i_segment;
unsigned int end = (start + 1)%n_segment;
v1 << x(UVLM::Mapping::vortex_indices(start, 0),
UVLM::Mapping::vortex_indices(start, 1)),
y(UVLM::Mapping::vortex_indices(start, 0),
UVLM::Mapping::vortex_indices(start, 1)),
z(UVLM::Mapping::vortex_indices(start, 0),
UVLM::Mapping::vortex_indices(start, 1));
v2 << x(UVLM::Mapping::vortex_indices(end, 0),
UVLM::Mapping::vortex_indices(end, 1)),
y(UVLM::Mapping::vortex_indices(end, 0),
UVLM::Mapping::vortex_indices(end, 1)),
z(UVLM::Mapping::vortex_indices(end, 0),
UVLM::Mapping::vortex_indices(end, 1));
uind += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
gamma,
vortex_radius);
// uind);
}
return uind;
}
template <typename t_zeta,
typename t_gamma,
typename t_ttriad,
typename t_uout>
void UVLM::BiotSavart::surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
t_uout& uout,
unsigned int Mstart,
unsigned int Nstart,
unsigned int Mend,
unsigned int Nend,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::VecVecMatrixX span_seg_uout;
UVLM::Types::VecVecMatrixX chord_seg_uout;
UVLM::Types::allocate_VecVecMat(span_seg_uout, 1, 3, (Mend-Mstart)+1, (Nend-Nstart));
UVLM::Types::allocate_VecVecMat(chord_seg_uout, 1, 3, (Mend-Mstart), (Nend-Nstart)+1);
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
UVLM::Types::Vector3 temp_uout;
for (uint i=Mstart; i<Mend; ++i)
{
for (uint j=Nstart; j<Nend; ++j)
{
// Spanwise vortices
v1 << zeta[0](i, j),
zeta[1](i, j),
zeta[2](i, j);
v2 << zeta[0](i, j+1),
zeta[1](i, j+1),
zeta[2](i, j+1);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
span_seg_uout[0][0](i,j) = temp_uout(0);
span_seg_uout[0][1](i,j) = temp_uout(1);
span_seg_uout[0][2](i,j) = temp_uout(2);
// Streamwise/chordwise vortices
v2 << zeta[0](i+1, j),
zeta[1](i+1, j),
zeta[2](i+1, j);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
chord_seg_uout[0][0](i,j) = temp_uout(0);
chord_seg_uout[0][1](i,j) = temp_uout(1);
chord_seg_uout[0][2](i,j) = temp_uout(2);
}
}
// Influence of the last spanwise vortex
for (uint j=Nstart; j<Nend; j++)
{
v1 << zeta[0](Mend, j),
zeta[1](Mend, j),
zeta[2](Mend, j);
v2 << zeta[0](Mend, j+1),
zeta[1](Mend, j+1),
zeta[2](Mend, j+1);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
span_seg_uout[0][0](Mend,j) = temp_uout(0);
span_seg_uout[0][1](Mend,j) = temp_uout(1);
span_seg_uout[0][2](Mend,j) = temp_uout(2);
}
// Influence of the last chordwise vortex
for (uint i=Mstart; i<Mend; i++)
{
v1 << zeta[0](i, Nend),
zeta[1](i, Nend),
zeta[2](i, Nend);
v2 << zeta[0](i+1, Nend),
zeta[1](i+1, Nend),
zeta[2](i+1, Nend);
temp_uout = UVLM::BiotSavart::segment(target_triad,
v1,
v2,
1.0,
vortex_radius);
chord_seg_uout[0][0](i,Nend) = temp_uout(0);
chord_seg_uout[0][1](i,Nend) = temp_uout(1);
chord_seg_uout[0][2](i,Nend) = temp_uout(2);
}
// Transfer influence from segments to vortices
for (uint i=Mstart; i<Mend; i++)
{
for (uint j=Nstart; j<Nend; j++)
{
for (uint i_dim=0; i_dim<UVLM::Constants::NDIM; ++i_dim)
{
uout[i_dim](i,j) -= span_seg_uout[0][i_dim](i,j)*gamma(i,j);
uout[i_dim](i,j) += span_seg_uout[0][i_dim](i+1,j)*gamma(i,j);
uout[i_dim](i,j) += chord_seg_uout[0][i_dim](i,j)*gamma(i,j);
uout[i_dim](i,j) -= chord_seg_uout[0][i_dim](i,j+1)*gamma(i,j);
}
// std::cout << i << " " << j << " "<< span_seg_uout[0][0](i,j) << " "<< span_seg_uout[0][1](i,j) << " "<< span_seg_uout[0][2](i,j) << std::endl;
// std::cout << i << " " << j << " "<< chord_seg_uout[0][0](i,j) << " "<< chord_seg_uout[0][1](i,j) << " "<< chord_seg_uout[0][2](i,j) << std::endl;
// std::cout << i << " " << j << " "<< uout[0](i,j) << " "<< uout[1](i,j) << " "<< uout[2](i,j) << std::endl;
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void UVLM::BiotSavart::surface_with_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
const bool& horseshoe,
t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
const uint Mstart = 0;
const uint Nstart = 0;
const uint Mend = gamma.rows();
const uint Nend = gamma.cols();
UVLM::BiotSavart::surface(zeta,
gamma,
target_triad,
uout,
Mstart,
Nstart,
Mend,
Nend,
image_method,
vortex_radius);
const uint i0 = 0;
const uint i = Mend - 1;
if (horseshoe)
{
UVLM::Types::Vector3 temp_uout;
for (unsigned int j=Nstart; j<Nend; ++j)
{
temp_uout.setZero();
UVLM::BiotSavart::horseshoe(target_triad,
zeta_star[0].template block<2,2>(i0,j),
zeta_star[1].template block<2,2>(i0,j),
zeta_star[2].template block<2,2>(i0,j),
gamma_star(i0,j),
temp_uout,
vortex_radius);
uout[0](i, j) += temp_uout(0);
uout[1](i, j) += temp_uout(1);
uout[2](i, j) += temp_uout(2);
}
} else
{
const uint mstar = gamma_star.rows();
UVLM::Types::Vector3 temp_uout;
for (unsigned int j=Nstart; j<Nend; ++j)
{
temp_uout.setZero();
// #pragma omp parallel for collapse(1) reduction(sum_Vector3: temp_uout)
for (uint i_star=0; i_star<mstar; ++i_star)
{
temp_uout += UVLM::BiotSavart::vortex_ring(target_triad,
zeta_star[0].template block<2,2>(i_star, j),
zeta_star[1].template block<2,2>(i_star, j),
zeta_star[2].template block<2,2>(i_star, j),
gamma_star(i_star, j),
vortex_radius);
// temp_uout);
}
uout[0](i, j) += temp_uout(0);
uout[1](i, j) += temp_uout(1);
uout[2](i, j) += temp_uout(2);
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_ttriad,
typename t_uout>
void UVLM::BiotSavart::surface_with_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_ttriad& target_triad,
t_uout& uout,
const bool& image_method,
const int& n_rows,
const UVLM::Types::Real& vortex_radius
)
{
const uint Mstart = 0;
const uint Nstart = 0;
const uint Mend = gamma.rows();
const uint Nend = gamma.cols();
// UVLM::Types::Vector3 temp_uout;
// Surface contribution
UVLM::BiotSavart::surface(zeta,
gamma,
target_triad,
uout,
Mstart,
Nstart,
Mend,
Nend,
image_method,
vortex_radius);
// wake contribution
// n_rows controls the number of panels that are included
// in the final result. Usually for unsteady wake, the value
// will be 1 when computing AIC coeffs.
// unless if gamma_star is a dummy one, just a row with ones.
const uint mstar = (n_rows == -1) ? gamma_star.rows():n_rows;
const uint i = Mend - 1;
UVLM::Types::Vector3 temp_uout;
for (uint j=Nstart; j<Nend; ++j)
{
temp_uout.setZero();
// #pragma omp parallel for collapse(1) reduction(sum_Vector3: temp_uout)
for (uint i_star=0; i_star<mstar; ++i_star)
{
// std::cout << "WARNING: this should not be computed" << std::endl;
temp_uout += UVLM::BiotSavart::vortex_ring(target_triad,
zeta_star[0].template block<2,2>(i_star, j),
zeta_star[1].template block<2,2>(i_star, j),
zeta_star[2].template block<2,2>(i_star, j),
gamma_star(i_star, j),
vortex_radius);
// temp_uout);
}
uout[0](i, j) += temp_uout(0);
uout[1](i, j) += temp_uout(1);
uout[2](i, j) += temp_uout(2);
}
}
template <typename t_zeta,
typename t_gamma,
typename t_tsurface,
typename t_uout,
typename t_normals>
void UVLM::BiotSavart::multisurface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_tsurface& target_surface,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const UVLM::Types::Real& vortex_radius
)
{
const unsigned int rows_collocation = target_surface[0].rows();
const unsigned int cols_collocation = target_surface[0].cols();
// int collocation_counter = -1;
// int surface_counter;
unsigned int surf_rows = gamma.rows();
unsigned int surf_cols = gamma.cols();
#pragma omp parallel for collapse(2)
for (unsigned int i_col=0; i_col<rows_collocation; ++i_col)
{
for (unsigned int j_col=0; j_col<cols_collocation; ++j_col)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::initialise_VecMat(temp_uout, 0.0);
int collocation_counter = j_col + i_col*cols_collocation;
target_triad << target_surface[0](i_col, j_col),
target_surface[1](i_col, j_col),
target_surface[2](i_col, j_col);
UVLM::BiotSavart::surface(zeta,
gamma,
target_triad,
temp_uout,
0,
0,
gamma.rows(),
gamma.cols(),
vortex_radius);
// surface_counter = -1;
// #pragma omp parallel for collapse(2)
for (unsigned int i_surf=0; i_surf<surf_rows; ++i_surf)
{
for (unsigned int j_surf=0; j_surf<surf_cols; ++j_surf)
{
int surface_counter = j_surf + i_surf*surf_cols;
uout(collocation_counter, surface_counter) +=
temp_uout[0](i_surf, j_surf)*normal[0](i_col, j_col) +
temp_uout[1](i_surf, j_surf)*normal[1](i_col, j_col) +
temp_uout[2](i_surf, j_surf)*normal[2](i_col, j_col);
}
}
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void UVLM::BiotSavart::multisurface_steady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
const bool& horseshoe,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const UVLM::Types::Real& vortex_radius
)
{
const unsigned int rows_collocation = target_surface[0].rows();
const unsigned int cols_collocation = target_surface[0].cols();
// int surface_counter;
const uint surf_rows = gamma.rows();
const uint surf_cols = gamma.cols();
#pragma omp parallel for collapse(2)
for (unsigned int i_col=0; i_col<rows_collocation; ++i_col)
{
for (unsigned int j_col=0; j_col<cols_collocation; ++j_col)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::initialise_VecMat(temp_uout, 0.0);
int collocation_counter = j_col + i_col*cols_collocation;
target_triad << target_surface[0](i_col, j_col),
target_surface[1](i_col, j_col),
target_surface[2](i_col, j_col);
UVLM::BiotSavart::surface_with_steady_wake(zeta,
zeta_star,
gamma,
gamma_star,
target_triad,
horseshoe,
temp_uout,
vortex_radius
);
// #pragma omp parallel for collapse(2)
for (unsigned int i_surf=0; i_surf<surf_rows; ++i_surf)
{
for (unsigned int j_surf=0; j_surf<surf_cols; ++j_surf)
{
int surface_counter = i_surf*surf_cols + j_surf;
uout(collocation_counter, surface_counter) +=
temp_uout[0](i_surf, j_surf)*normal[0](i_col, j_col) +
temp_uout[1](i_surf, j_surf)*normal[1](i_col, j_col) +
temp_uout[2](i_surf, j_surf)*normal[2](i_col, j_col);
}
}
}
}
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_tsurface,
typename t_uout,
typename t_normals>
void UVLM::BiotSavart::multisurface_unsteady_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const t_tsurface& target_surface,
t_uout& uout,
const bool& image_method,
const t_normals& normal,
const int& n_rows,
const UVLM::Types::Real& vortex_radius
)
{
const unsigned int rows_collocation = target_surface[0].rows();
const unsigned int cols_collocation = target_surface[0].cols();
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::Vector3 target_triad;
// int surface_counter;
unsigned int surf_rows = gamma.rows();
unsigned int surf_cols = gamma.cols();
#pragma omp parallel for collapse(2)
for (unsigned int i_col=0; i_col<rows_collocation; ++i_col)
{
for (unsigned int j_col=0; j_col<cols_collocation; ++j_col)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::VecMatrixX temp_uout;
UVLM::Types::allocate_VecMat(temp_uout, zeta, -1);
UVLM::Types::initialise_VecMat(temp_uout, 0.0);
int collocation_counter = j_col + i_col*cols_collocation;
target_triad << target_surface[0](i_col, j_col),
target_surface[1](i_col, j_col),
target_surface[2](i_col, j_col);
UVLM::BiotSavart::surface_with_unsteady_wake(zeta,
zeta_star,
gamma,
gamma_star,
target_triad,
temp_uout,
image_method,
n_rows,
vortex_radius
);
// #pragma omp parallel for collapse(2)
for (unsigned int i_surf=0; i_surf<surf_rows; ++i_surf)
{
for (unsigned int j_surf=0; j_surf<surf_cols; ++j_surf)
{
int surface_counter = i_surf*surf_cols + j_surf;
uout(collocation_counter, surface_counter) +=
temp_uout[0](i_surf, j_surf)*normal[0](i_col, j_col) +
temp_uout[1](i_surf, j_surf)*normal[1](i_col, j_col) +
temp_uout[2](i_surf, j_surf)*normal[2](i_col, j_col);
}
}
}
}
}
// template <typename t_zeta,
// typename t_gamma,
// typename t_zeta_col,
// typename t_u_ind>
// void UVLM::BiotSavart::multisurface_on_multisurface
// (
// const t_zeta& zeta,
// const t_gamma& gamma,
// const t_zeta_col& zeta_col,
// const bool image_method,
// t_u_ind& u_ind
// )
// {
//
// }
template <typename t_zeta,
typename t_gamma,
typename t_zeta_col,
typename t_u_ind>
void UVLM::BiotSavart::whole_surface_on_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_zeta_col& zeta_col,
t_u_ind& u_ind,
const bool image_method,
const UVLM::Types::Real& vortex_radius
)
{
const uint col_n_M = zeta_col[0].rows();
const uint col_n_N = zeta_col[0].cols();
#pragma omp parallel for collapse(2)
for (uint col_i_M=0; col_i_M<col_n_M; ++col_i_M)
{
for (uint col_j_N=0; col_j_N<col_n_N; ++col_j_N)
{
UVLM::Types::Vector3 target_triad;
UVLM::Types::Vector3 uout;
target_triad << zeta_col[0](col_i_M, col_j_N),
zeta_col[1](col_i_M, col_j_N),
zeta_col[2](col_i_M, col_j_N);
uout = UVLM::BiotSavart::whole_surface
(
zeta,
gamma,
target_triad,
// uout,
image_method,
vortex_radius
);
u_ind[0](col_i_M, col_j_N) += uout(0);
u_ind[1](col_i_M, col_j_N) += uout(1);
u_ind[2](col_i_M, col_j_N) += uout(2);
}
}
}
template <typename t_zeta,
typename t_gamma,
typename t_ttriad>
// typename t_uout>
UVLM::Types::Vector3 UVLM::BiotSavart::whole_surface
(
const t_zeta& zeta,
const t_gamma& gamma,
const t_ttriad& target_triad,
// t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius,
unsigned int Mstart,
unsigned int Nstart
)
{
// If Mend or Nend are == -1, their values are taken as the surface M and N
uint Mend = gamma.rows();
uint Nend = gamma.cols();
UVLM::Types::Vector3 uout;
uout.setZero();
UVLM::Types::Vector3 temp_uout;
UVLM::Types::Vector3 v1;
UVLM::Types::Vector3 v2;
UVLM::Types::Real delta_gamma;
for (unsigned int i=Mstart; i<Mend; ++i)
{
for (unsigned int j=Nstart; j<Nend; ++j)
{
// Spanwise vortices
v1 << zeta[0](i, j),
zeta[1](i, j),
zeta[2](i, j);
v2 << zeta[0](i, j+1),
zeta[1](i, j+1),
zeta[2](i, j+1);
if (i == Mstart){
delta_gamma = gamma(i, j);
} else {
delta_gamma = gamma(i, j) - gamma(i-1, j);
}
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
-delta_gamma,
vortex_radius);
// Streamwise/chordwise vortices
v2 << zeta[0](i+1, j),
zeta[1](i+1, j),
zeta[2](i+1, j);
if (j == Nstart){
delta_gamma = -gamma(i, j);
} else {
delta_gamma = gamma(i, j-1) - gamma(i, j);
}
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
-delta_gamma,
vortex_radius);
}
}
for (unsigned int j=Nstart; j<Nend; ++j)
{
// Spanwise vortices
v1 << zeta[0](Mend, j),
zeta[1](Mend, j),
zeta[2](Mend, j);
v2 << zeta[0](Mend, j+1),
zeta[1](Mend, j+1),
zeta[2](Mend, j+1);
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
gamma(Mend-1,j),
vortex_radius);
}
for (unsigned int i=Mstart; i<Mend; ++i)
{
// Streamwise/chordwise vortices
v1 << zeta[0](i, Nend),
zeta[1](i, Nend),
zeta[2](i, Nend);
v2 << zeta[0](i+1, Nend),
zeta[1](i+1, Nend),
zeta[2](i+1, Nend);
uout += UVLM::BiotSavart::segment(target_triad,
v1,
v2,
-gamma(i, Nend-1),
vortex_radius);
}
return uout;
}
template <typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star,
typename t_uout>
void UVLM::BiotSavart::total_induced_velocity_on_wake
(
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
t_uout& uout,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
const uint n_surf = zeta.size();
for (uint col_i_surf=0; col_i_surf<n_surf; ++col_i_surf)
{
for (uint i_surf=0; i_surf<n_surf; ++i_surf)
{
// wake on wake
UVLM::BiotSavart::whole_surface_on_surface
(
zeta_star[i_surf],
gamma_star[i_surf],
zeta_star[col_i_surf],
uout[col_i_surf],
image_method,
vortex_radius
);
// surface on wake
UVLM::BiotSavart::whole_surface_on_surface
(
zeta[i_surf],
gamma[i_surf],
zeta_star[col_i_surf],
uout[col_i_surf],
image_method,
vortex_radius
);
}
}
}
template <typename t_ttriad,
typename t_zeta,
typename t_zeta_star,
typename t_gamma,
typename t_gamma_star>
UVLM::Types::Vector3 UVLM::BiotSavart::total_induced_velocity_on_point
(
const t_ttriad& target_triad,
const t_zeta& zeta,
const t_zeta_star& zeta_star,
const t_gamma& gamma,
const t_gamma_star& gamma_star,
const bool& image_method,
const UVLM::Types::Real& vortex_radius
)
{
UVLM::Types::Vector3 uout;
uout.setZero();
const uint n_surf = zeta.size();
for (uint i_surf=0; i_surf<n_surf; ++i_surf)
{
// wake on point
uout += UVLM::BiotSavart::whole_surface
(
zeta_star[i_surf],
gamma_star[i_surf],
target_triad,
// sum_uout,
image_method,
vortex_radius
);
// surface on point
uout += UVLM::BiotSavart::whole_surface
(
zeta[i_surf],
gamma[i_surf],
target_triad,
// sum_uout,
image_method,
vortex_radius
);
}
return uout;
}
namespace UVLMlin{
const double PI = UVLM::Constants::PI;
const double PIquart = UVLM::Constants::INV_PI4;
const int svec[Nvert]={0, 1, 2, 3}; // seg. no.
const int avec[Nvert]={0, 1, 2, 3}; // seg. no.
const int bvec[Nvert]={1, 2, 3, 0}; // seg. no.
const int dm[Nvert]={0,1,1,0};
const int dn[Nvert]={0,0,1,1};
void biot_panel_map( map_RowVec3& velP,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius){
/*
This implementation works with mapping objects.
*/
// declarations
int ii,aa,bb;
const double Cbiot=PIquart*gamma;
double vcr2;
RowVector3d RAB, Vcr;
Vector3d Vsc;
Vector4d RABsq;
Matrix4by3d R; // vectors P - vertex matrix
Matrix4by3d Runit; // unit vectors P - vertex matrix
// We keep vortex_radius_sq = vortex_radius. We have found accuracy issues
// when vortex_radius_sq = vortex_radius*vortex_radius;
// We think this is a limit for numerical accuracy so it makes
// sense to keep it vortex_radius_sq = vortex_radius;
double vortex_radius_sq = vortex_radius;
// ----------------------------------------------- Compute common variables
// these are constants or variables depending only on vertices and P coords
for(ii=0;ii<Nvert;ii++){
R.row(ii)=zetaP-ZetaPanel.row(ii);
Runit.row(ii)=R.row(ii)/R.row(ii).norm();
}
// -------------------------------------------------- Loop through segments
for(ii=0;ii<Nvert;ii++){
aa=avec[ii];
bb=bvec[ii];
RAB=ZetaPanel.row(bb)-ZetaPanel.row(aa); // segment vector
Vcr=R.row(aa).cross(R.row(bb));
vcr2=Vcr.dot(Vcr);
if (vcr2<vortex_radius_sq*RAB.dot(RAB)) continue;
velP += ((Cbiot/vcr2) * RAB.dot(Runit.row(aa)-Runit.row(bb))) *Vcr;
}
}
// -----------------------------------------------------------------------------
void der_biot_panel( Matrix3d& DerP, Matrix3d DerVertices[Nvert],
const RowVector3d zetaP, const Matrix4by3d ZetaPanel, const double gamma, double vortex_radius){
/* This implementation is no suitable for python interface */
// declarations
int ii,aa,bb;
const double Cbiot=PIquart*gamma;
double r1inv, vcr2, vcr2inv, vcr4inv, dotprod, diag_fact, off_fact;
RowVector3d RAB, Vcr, Tv;
Vector3d Vsc;
Matrix3d Dvcross, Ddiff, dQ_dRA, dQ_dRB, dQ_dRAB;
Matrix4by3d R; // vectors P - vertex matrix
Matrix4by3d Runit; // unit vectors P - vertex matrix
Matrix3d Array_Der_runit[Nvert]; // as a static arrays (we know size)
// We keep vortex_radius_sq = vortex_radius. We have found accuracy issues
// when vortex_radius_sq = vortex_radius*vortex_radius;
// We think this is a limit for numerical accuracy so it makes
// sense to keep it vortex_radius_sq = vortex_radius;
double vortex_radius_sq = vortex_radius;
// ----------------------------------------------- Compute common variables
// these are constants or variables depending only on vertices and P coords
for(ii=0;ii<Nvert;ii++){
R.row(ii)=zetaP-ZetaPanel.row(ii);
r1inv=1./R.row(ii).norm();
Runit.row(ii)=R.row(ii)*r1inv;
der_runit( Array_Der_runit[ii], R.row(ii), r1inv, -std::pow(r1inv,3) );
}
// -------------------------------------------------- Loop through segments
for(ii=0;ii<Nvert;ii++){
// vertices indices
aa=avec[ii];
bb=bvec[ii];
// utility vars
RAB=ZetaPanel.row(bb)-ZetaPanel.row(aa); // segment vector
Vcr=R.row(aa).cross(R.row(bb));
vcr2=Vcr.dot(Vcr);
if (vcr2<vortex_radius_sq*RAB.dot(RAB)){
//cout << endl << "Skipping seg. " << ii << endl;
continue;}
Tv=Runit.row(aa)-Runit.row(bb);
dotprod=RAB.dot(Tv);
// ------------------------------------------ cross-product derivatives
// lower triangular part only
vcr2inv=1./vcr2;
vcr4inv=vcr2inv*vcr2inv;
diag_fact= Cbiot*vcr2inv*dotprod;
off_fact =-2.*Cbiot*vcr4inv*dotprod;
Dvcross(0,0)=diag_fact+off_fact*Vcr[0]*Vcr[0];
Dvcross(1,0)=off_fact*Vcr[0]*Vcr[1];
Dvcross(1,1)=diag_fact+off_fact*Vcr[1]*Vcr[1];
Dvcross(2,0)=off_fact*Vcr[0]*Vcr[2];
Dvcross(2,1)=off_fact*Vcr[1]*Vcr[2];
Dvcross(2,2)= diag_fact+off_fact*Vcr[2]*Vcr[2];
// ------------------------------- difference and RAB terms derivatives
Vsc=Vcr.transpose()*vcr2inv*Cbiot;
Ddiff=Vsc*RAB;
dQ_dRAB=Vsc*Tv;
// ----------------------------------------------------- Final assembly
dQ_dRA=Dvcross_by_skew3d(Dvcross,-R.row(bb))+Ddiff*Array_Der_runit[aa];
dQ_dRB=Dvcross_by_skew3d(Dvcross, R.row(aa))-Ddiff*Array_Der_runit[bb];
DerP += dQ_dRA + dQ_dRB;
DerVertices[aa] -= dQ_dRAB + dQ_dRA;
DerVertices[bb] += dQ_dRAB - dQ_dRB;
}
}
void der_biot_panel_map( map_Mat3by3& DerP,
Vec_map_Mat3by3& DerVertices,
const map_RowVec3 zetaP,
const map_Mat4by3 ZetaPanel,
const double gamma,
double vortex_radius){
/*
This implementation works with mapping objects.
*/
// declarations
int ii,aa,bb;
const double Cbiot=PIquart*gamma;
double r1inv, vcr2, vcr2inv, vcr4inv, dotprod, diag_fact, off_fact;
RowVector3d RAB, Vcr, Tv;
Vector3d Vsc;
Matrix3d Dvcross, Ddiff, dQ_dRA, dQ_dRB, dQ_dRAB;
Matrix4by3d R; // vectors P - vertex matrix
Matrix4by3d Runit; // unit vectors P - vertex matrix
Matrix3d Array_Der_runit[Nvert]; // as a static arrays (we know size)
// We keep vortex_radius_sq = vortex_radius. We have found accuracy issues
// when vortex_radius_sq = vortex_radius*vortex_radius;
// We think this is a limit for numerical accuracy so it makes
// sense to keep it vortex_radius_sq = vortex_radius;
double vortex_radius_sq = vortex_radius;
// ----------------------------------------------- Compute common variables
// these are constants or variables depending only on vertices and P coords
for(ii=0;ii<Nvert;ii++){
R.row(ii)=zetaP-ZetaPanel.row(ii);
r1inv=1./R.row(ii).norm();
Runit.row(ii)=R.row(ii)*r1inv;
der_runit( Array_Der_runit[ii], R.row(ii), r1inv, -std::pow(r1inv,3) );
}
// -------------------------------------------------- Loop through segments
for(ii=0;ii<Nvert;ii++){
// vertices indices
aa=avec[ii];
bb=bvec[ii];
// utility vars
RAB=ZetaPanel.row(bb)-ZetaPanel.row(aa); // segment vector
Vcr=R.row(aa).cross(R.row(bb));
vcr2=Vcr.dot(Vcr);
if (vcr2<vortex_radius_sq*RAB.dot(RAB)){
//cout << endl << "Skipping seg. " << ii << endl;
continue;}
Tv=Runit.row(aa)-Runit.row(bb);
dotprod=RAB.dot(Tv);
// ------------------------------------------ cross-product derivatives
// lower triangular part only
vcr2inv=1./vcr2;
vcr4inv=vcr2inv*vcr2inv;
diag_fact= Cbiot*vcr2inv*dotprod;
off_fact =-2.*Cbiot*vcr4inv*dotprod;
Dvcross(0,0)=diag_fact+off_fact*Vcr[0]*Vcr[0];
Dvcross(1,0)=off_fact*Vcr[0]*Vcr[1];
Dvcross(1,1)=diag_fact+off_fact*Vcr[1]*Vcr[1];
Dvcross(2,0)=off_fact*Vcr[0]*Vcr[2];
Dvcross(2,1)=off_fact*Vcr[1]*Vcr[2];
Dvcross(2,2)= diag_fact+off_fact*Vcr[2]*Vcr[2];
// ------------------------------- difference and RAB terms derivatives
Vsc=Vcr.transpose()*vcr2inv*Cbiot;
Ddiff=Vsc*RAB;
dQ_dRAB=Vsc*Tv;
// ----------------------------------------------------- Final assembly
dQ_dRA=Dvcross_by_skew3d(Dvcross,-R.row(bb))+Ddiff*Array_Der_runit[aa];
dQ_dRB=Dvcross_by_skew3d(Dvcross, R.row(aa))-Ddiff*Array_Der_runit[bb];
//cout << endl << "dQ_dRA = " << endl << dQ_dRA << endl;
DerP += dQ_dRA + dQ_dRB;
DerVertices[aa] -= dQ_dRAB + dQ_dRA;
DerVertices[bb] += dQ_dRAB - dQ_dRB;
}
/* cout << "vcr2=" << vcr2 << endl;
cout << "Tv=" << Tv << endl;
cout << "dotprod=" << dotprod << endl;
cout << "dQ_dRB=" << dQ_dRB << endl;
*/
}
// -----------------------------------------------------------------------------
// Sub-functions
void der_runit(Matrix3d& Der,const RowVector3d& rv, double rinv,double minus_rinv3){
/*Warning:
1. RowVector3d needs to defined as constant if in main code RowVector
is a row of a matrix.
2. The function will fail is Matrix3d is a sub-block of a matrix.
*/
// alloc upper diagonal part
Der(0,0)=rinv+minus_rinv3*rv(0)*rv(0);
Der(0,1)= minus_rinv3*rv(0)*rv(1);
Der(0,2)= minus_rinv3*rv(0)*rv(2);
Der(1,1)=rinv+minus_rinv3*rv(1)*rv(1);
Der(1,2)= minus_rinv3*rv(1)*rv(2);
Der(2,2)=rinv+minus_rinv3*rv(2)*rv(2);
// alloc lower diag
Der(1,0)=Der(0,1);
Der(2,0)=Der(0,2);
Der(2,1)=Der(1,2);
}
Matrix3d Dvcross_by_skew3d(const Matrix3d& Dvcross, const RowVector3d& rv){
/*Warning:
1. RowVector3d needs to defined as constant if in main code RowVector
is a row of a matrix.
*/
Matrix3d P;
P(0,0)=Dvcross(1,0)*rv(2)-Dvcross(2,0)*rv(1);
P(0,1)=Dvcross(2,0)*rv(0)-Dvcross(0,0)*rv(2);
P(0,2)=Dvcross(0,0)*rv(1)-Dvcross(1,0)*rv(0);
//
P(1,0)=P(0,1);
P(1,1)=Dvcross(2,1)*rv(0)-Dvcross(1,0)*rv(2);
P(1,2)=Dvcross(1,0)*rv(1)-Dvcross(1,1)*rv(0);
//
P(2,0)=P(0,2);
P(2,1)=P(1,2);
P(2,2)=Dvcross(2,0)*rv(1)-Dvcross(2,1)*rv(0);
return P;
}
// -----------------------------------------------------------------------------
void dvinddzeta(map_Mat3by3 DerC,
map_Mat DerV,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
int& Kzeta_in,
bool& IsBound,
int& M_in_bound, // M of bound surf associated
int& Kzeta_in_bound,
double vortex_radius
)
{
int cc, vv, mm, nn, jj, cc_in; //pp
//int Kin=M_in*N_in;
// below defined as maps to keep compatibility with der-biot_panel_map
//Matrix4by3d ZetaPanel_in;
//Matrix3d derv[Nvert];
double p_ZetaPanel_in[12];
double p_derv[36];
map_Mat4by3 ZetaPanel_in(p_ZetaPanel_in);
Vec_map_Mat3by3 derv;
for(vv=0;vv<4;vv++) derv.push_back( map_Mat3by3(p_derv+9*vv) );
/* cout << "Kzeta_in=" << endl << Kzeta_in << endl;
cout << "DerV = " << endl << DerV << endl;
cout << "GammaIn = " << endl << GammaIn << endl;
for(cc=0;cc<3;cc++){
cout << "ZetaIn[" << cc << "] = " << endl << ZetaIn[cc] << endl;
}*/
if (IsBound){// ------------------------------------------------ Bound case
// Loop panels (mm,nn)
for (mm=0; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
// init. local derivatives
for(vv=0; vv<Nvert; vv++) derv[vv].setZero();
// get local deriv
der_biot_panel_map(DerC,derv,zetaC,ZetaPanel_in,GammaIn(mm,nn), vortex_radius);
//for(vv=0; vv<Nvert; vv++) cout << derv[vv] << endl;
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
for(cc_in=0; cc_in<3; cc_in++){
jj= cc_in*Kzeta_in + (mm+dm[vv])*(N_in+1) + (nn+dn[vv]);
DerV(cc,jj)+=derv[vv](cc,cc_in);
}
}
}
}
}
} else{ // ------------------------------------------------------ Wake case
// scan TE first
mm=0;
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
// init. local derivatives. only vertices 0 and 3 are on TE
derv[0].setZero();
derv[3].setZero();
// get local deriv
der_biot_panel_map(DerC,derv,zetaC,ZetaPanel_in,GammaIn(mm,nn), vortex_radius);
for(cc=0; cc<3; cc++){
for(cc_in=0; cc_in<3; cc_in++){
// vv=0
jj= cc_in*Kzeta_in_bound + M_in_bound*(N_in+1) + (nn);
DerV(cc,jj)+=derv[0](cc,cc_in);
// vv=3
jj= cc_in*Kzeta_in_bound + M_in_bound*(N_in+1) + (nn+1);
DerV(cc,jj)+=derv[3](cc,cc_in);
}
}
}
// Loop other panels (mm,nn) for colloc point
for (mm=1; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
// update DerC
der_biot_panel_map(DerC,derv,zetaC,ZetaPanel_in,GammaIn(mm,nn), vortex_radius);
}// loop nn
}// loop mm
}// if-else
}
void aic3( map_Mat AIC3,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
int& M_in,
int& N_in,
double vortex_radius)
{
int mm,nn,cc,vv;
double p_ZetaPanel_in[12];
map_Mat4by3 ZetaPanel_in(p_ZetaPanel_in);
double p_vel[3];
map_RowVec3 vel(p_vel);
// Loop panels (mm,nn)
for (mm=0; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
vel.setZero();
biot_panel_map( vel, zetaC, ZetaPanel_in, 1.0, vortex_radius);
AIC3.col(mm*N_in+nn)=vel;
}
}
}
void ind_vel(map_RowVec3 velC,
const map_RowVec3 zetaC,
Vec_map_Mat ZetaIn,
map_Mat GammaIn,
int& M_in,
int& N_in,
double vortex_radius)
{
int mm,nn,cc,vv;
double p_ZetaPanel_in[12];
map_Mat4by3 ZetaPanel_in(p_ZetaPanel_in);
// Loop panels (mm,nn)
for (mm=0; mm<M_in; mm++){
for (nn=0; nn<N_in; nn++){
//pp=mm*N_in+nn; // panel no.
// get panel coords in 4x3 format
for(cc=0; cc<3; cc++){
for(vv=0; vv<Nvert; vv++){
ZetaPanel_in(vv,cc)=ZetaIn[cc](mm+dm[vv],nn+dn[vv]);
}
}
biot_panel_map( velC, zetaC, ZetaPanel_in, GammaIn(mm,nn), vortex_radius);
}
}
}
}
|
parallelfmm.h | #include "serialfmm.h"
class ParallelFMM : public SerialFMM {
private:
int EXTERNAL;
MPI_Request *requests;
#if PRINT_COMM
std::ofstream fid;
#endif
void gatherMultipoles() {
int i = getGlobKey(IX[gatherLevel],gatherLevel) + globLevelOffset[gatherLevel];
for_m sendMultipole[0][m] = globMultipole[i][m];
int numGather = numPartition[gatherLevel][0] * numPartition[gatherLevel][1] * numPartition[gatherLevel][2];
assert( numGather <= numSendCells ); // resize recvMultipole to avoid this
int rank;
MPI_Comm_rank(MPI_COMM_LOCAL,&rank);
if( rank == 0 ) {
MPI_Allgather(sendMultipole[0],MTERM,MPI_FLOAT,
recvMultipole[0],MTERM,MPI_FLOAT,MPI_COMM_GLOBAL);
}
MPI_Bcast(recvMultipole[0],numGather*MTERM,MPI_FLOAT,0,MPI_COMM_LOCAL);
for( int c=0; c<numGather; c++ ) {
for_m globMultipole[c+globLevelOffset[gatherLevel]][m] = recvMultipole[c][m];
}
}
public:
ParallelFMM() {
int argc(0);
char **argv;
MPI_Initialized(&EXTERNAL);
if(!EXTERNAL) MPI_Init(&argc,&argv);
MPI_Comm_size(MPI_COMM_WORLD,&MPISIZE);
MPI_Comm_rank(MPI_COMM_WORLD,&MPIRANK);
printNow = MPIRANK == 0;
requests = new MPI_Request [104];
#if PRINT_COMM
char fname[256];
sprintf(fname,"time%4.4d.dat",MPIRANK);
fid.open(fname);
#endif
}
~ParallelFMM() {
#if PRINT_COMM
fid.close();
#endif
delete[] requests;
if(!EXTERNAL) MPI_Finalize();
}
void P2PSend() {
MPI_Status stats[52];
int rankOffset = 13 * numLeafs;
int ixc[3];
getGlobIndex(ixc,MPIRANK,maxGlobLevel);
int nunit[3];
for_3d nunit[d] = numPartition[maxGlobLevel][d];
int ileaf = 0;
int iforward = 0;
int ix[3];
float commBytes = 0;
for( ix[2]=-1; ix[2]<=1; ix[2]++ ) {
for( ix[1]=-1; ix[1]<=1; ix[1]++ ) {
for( ix[0]=-1; ix[0]<=1; ix[0]++ ) {
if( ix[0] != 0 || ix[1] != 0 || ix[2] != 0 ) {
assert( ileaf == leafsDispl[iforward] );
int ibody = bodiesDispl[iforward];
int nxmin[3] = {(1 << maxLevel) - 1, 0, 0};
int nxmax[3] = {1 << maxLevel, 1 << maxLevel, 1};
int jx[3];
for( jx[2]=nxmin[ix[2]+1]; jx[2]<nxmax[ix[2]+1]; jx[2]++ ) {
for( jx[1]=nxmin[ix[1]+1]; jx[1]<nxmax[ix[1]+1]; jx[1]++ ) {
for( jx[0]=nxmin[ix[0]+1]; jx[0]<nxmax[ix[0]+1]; jx[0]++, ileaf++ ) {
int jxp[3] = {jx[0], jx[1], jx[2]};
int j = getKey(jxp,maxLevel,false) + rankOffset;
sendLeafs[ileaf][0] = ibody;
for( int jbody=Leafs[j][0]; jbody<Leafs[j][1]; ibody++, jbody++ ) {
for_4d sendJbodies[ibody][d] = Jbodies[jbody][d];
}
sendLeafs[ileaf][1] = ibody;
}
}
}
if(iforward != 25 ) {
if( ibody > bodiesDispl[iforward+1] ) std::cout << "ibody: " << ibody << " bodiesDispl: " << bodiesDispl[iforward+1] << " @rank: " << MPIRANK << std::endl;
}
int ixp[3];
for_3d ixp[d] = (ixc[d] - ix[d] + nunit[d]) % nunit[d];
int sendRank = getGlobKey(ixp,maxGlobLevel);
for_3d ixp[d] = (ixc[d] + ix[d] + nunit[d]) % nunit[d];
int recvRank = getGlobKey(ixp,maxGlobLevel);
int sendDispl = leafsDispl[iforward];
int sendCount = leafsCount[iforward];
commBytes += sendCount * 2 * 4;
MPI_Isend(sendLeafs[sendDispl],sendCount*2,MPI_INT,
sendRank,iforward,MPI_COMM_WORLD,&requests[iforward]);
int recvDispl = leafsDispl[iforward];
int recvCount = leafsCount[iforward];
MPI_Irecv(recvLeafs[recvDispl],recvCount*2,MPI_INT,
recvRank,iforward,MPI_COMM_WORLD,&requests[iforward+52]);
sendDispl = bodiesDispl[iforward];
sendCount = bodiesCount[iforward];
commBytes += sendCount * 4 * 4;
MPI_Isend(sendJbodies[sendDispl],sendCount*4,MPI_FLOAT,
sendRank,iforward+26,MPI_COMM_WORLD,&requests[iforward+26]);
recvDispl = bodiesDispl[iforward];
recvCount = bodiesCount[iforward];
MPI_Irecv(recvJbodies[recvDispl],recvCount*4,MPI_FLOAT,
recvRank,iforward+26,MPI_COMM_WORLD,&requests[iforward+78]);
iforward++;
}
}
}
}
#if PRINT_COMM
int cells = (pow((1 << maxLevel) + 2,3) - (1 << (3 * maxLevel)));
float theoBytes = cells * (2 + numBodies / numLeafs * 8) * 4;
fid << "level : " << maxGlobLevel+maxLevel << " P2P comm (theoretical) : " << std::setw(8) << theoBytes << ", (actual) : " << std::setw(8) << commBytes << " Bytes" << std::endl;
#endif
MPI_Waitall(52,requests,stats);
}
void P2PRecv() const {
MPI_Status stats[52];
MPI_Waitall(52,&requests[52],stats);
int ileaf = 0;
int iforward = 0;
int ix[3];
for( ix[2]=-1; ix[2]<=1; ix[2]++ ) {
for( ix[1]=-1; ix[1]<=1; ix[1]++ ) {
for( ix[0]=-1; ix[0]<=1; ix[0]++ ) {
if( ix[0] != 0 || ix[1] != 0 || ix[2] != 0 ) {
assert( ileaf == leafsDispl[iforward] );
int rankIndex = (ix[0] + 1) + 3 * (ix[1] + 1) + 9 * (ix[2] + 1);
int rankOffset = rankIndex * numLeafs;
int ibody = numBodies + bodiesDispl[iforward];
int nxmin[3] = {(1 << maxLevel) - 1, 0, 0};
int nxmax[3] = {1 << maxLevel, 1 << maxLevel, 1};
int jx[3];
for( jx[2]=nxmin[ix[2]+1]; jx[2]<nxmax[ix[2]+1]; jx[2]++ ) {
for( jx[1]=nxmin[ix[1]+1]; jx[1]<nxmax[ix[1]+1]; jx[1]++ ) {
for( jx[0]=nxmin[ix[0]+1]; jx[0]<nxmax[ix[0]+1]; jx[0]++, ileaf++ ) {
int jxp[3] = {jx[0], jx[1], jx[2]};
int j = getKey(jxp,maxLevel,false) + rankOffset;
Leafs[j][0] = ibody;
for( int jbody=recvLeafs[ileaf][0]; jbody<recvLeafs[ileaf][1]; ibody++, jbody++ ) {
for_4d Jbodies[ibody][d] = recvJbodies[jbody][d];
}
Leafs[j][1] = ibody;
}
}
}
iforward++;
}
}
}
}
}
void M2LSend(int lev) {
MPI_Status stats[26];
int rankOffset = 13 * numCells;
int ixc[3];
getGlobIndex(ixc,MPIRANK,maxGlobLevel);
int nunit[3];
for_3d nunit[d] = numPartition[maxGlobLevel][d];
int nxmin[3] = {(1 << lev) - 2, 0, 0};
int nxmax[3] = {1 << lev, 1 << lev, 2};
int i = 0;
int iforward = 0;
int ix[3];
float commBytes = 0;
for( ix[2]=-1; ix[2]<=1; ix[2]++ ) {
for( ix[1]=-1; ix[1]<=1; ix[1]++ ) {
for( ix[0]=-1; ix[0]<=1; ix[0]++ ) {
if( ix[0] != 0 || ix[1] != 0 || ix[2] != 0 ) {
int jx[3];
for( jx[2]=nxmin[ix[2]+1]; jx[2]<nxmax[ix[2]+1]; jx[2]++ ) {
for( jx[1]=nxmin[ix[1]+1]; jx[1]<nxmax[ix[1]+1]; jx[1]++ ) {
for( jx[0]=nxmin[ix[0]+1]; jx[0]<nxmax[ix[0]+1]; jx[0]++, i++ ) {
int jxp[3] = {jx[0], jx[1], jx[2]};
int j = getKey(jxp,lev) + rankOffset;
for_m sendMultipole[i][m] = Multipole[j][m];
commBytes += MTERM * 4;
}
}
}
int ixp[3];
for_3d ixp[d] = (ixc[d] - ix[d] + nunit[d]) % nunit[d];
int sendRank = getGlobKey(ixp,maxGlobLevel);
for_3d ixp[d] = (ixc[d] + ix[d] + nunit[d]) % nunit[d];
int recvRank = getGlobKey(ixp,maxGlobLevel);
int sendDispl = multipoleDispl[lev][iforward];
int sendCount = multipoleCount[lev][iforward];
MPI_Isend(sendMultipole[sendDispl],sendCount*MTERM,MPI_FLOAT,
sendRank,iforward,MPI_COMM_WORLD,&requests[iforward]);
int recvDispl = multipoleDispl[lev][iforward];
int recvCount = multipoleCount[lev][iforward];
MPI_Irecv(recvMultipole[recvDispl],recvCount*MTERM,MPI_FLOAT,
recvRank,iforward,MPI_COMM_WORLD,&requests[iforward+26]);
iforward++;
}
}
}
}
#if PRINT_COMM
int cells = (pow((1 << lev) + 4,3) - (1 << (3 * lev)));
float theoBytes = cells * MTERM * 4;
fid << "level : " << maxGlobLevel+lev << " M2L comm (theoretical) : " << std::setw(8) << theoBytes << ", (actual) : " << std::setw(8) << commBytes << " Bytes" << std::endl;
#endif
MPI_Waitall(26,requests,stats);
}
void M2LRecv(int lev) const {
MPI_Status stats[26];
int nxmin[3] = {(1 << lev) - 2, 0, 0};
int nxmax[3] = {1 << lev, 1 << lev, 2};
for( int iforward=0; iforward<26; iforward++ ) {
int irequest;
MPI_Waitany(26,&requests[26],&irequest,stats);
int rankIndex = irequest < 13 ? irequest : irequest+1;
int ix[3] = {rankIndex % 3, rankIndex / 3 % 3, rankIndex / 9};
for_3d ix[d]--;
int i = multipoleDispl[lev][irequest];
int rankOffset = rankIndex * numCells;
int jx[3];
for( jx[2]=nxmin[ix[2]+1]; jx[2]<nxmax[ix[2]+1]; jx[2]++ ) {
for( jx[1]=nxmin[ix[1]+1]; jx[1]<nxmax[ix[1]+1]; jx[1]++ ) {
for( jx[0]=nxmin[ix[0]+1]; jx[0]<nxmax[ix[0]+1]; jx[0]++, i++ ) {
int jxp[3] = {jx[0], jx[1], jx[2]};
int j = getKey(jxp,lev) + rankOffset;
for_m Multipole[j][m] = recvMultipole[i][m];
}
}
}
}
}
void rootGather() {
#pragma omp parallel for
for(int i=0;i<numGlobCells;i++){
for_m globMultipole[i][m] = 0;
}
#pragma omp parallel for
for( int lev=0; lev<=maxGlobLevel; lev++ ) {
for_l globLocal[lev][l] = 0;
}
}
void globM2MSend(int level) {
int numChild[3];
for_3d numChild[d] = numPartition[level][d] / numPartition[level-1][d];
int numStride[3];
for_3d numStride[d] = numPartition[maxGlobLevel][d] / numPartition[level][d];
int ix[3];
for_3d ix[d] = IX[level][d];
int ixoff[3];
for_3d ixoff[d] = IX[maxGlobLevel][d] % numStride[d];
int jxoff[3];
for_3d jxoff[d] = (IX[level][d] / numChild[d]) * numChild[d];
int i = getGlobKey(ix,level) + globLevelOffset[level];
for_m sendMultipole[0][m] = globMultipole[i][m];
int iforward = 0;
int numComm = numChild[0] * numChild[1] * numChild[2] - 1;
MPI_Status *stats = new MPI_Status[numComm];
float commBytes = 0;
int jx[3];
for( jx[2]=jxoff[2]; jx[2]<jxoff[2]+numChild[2]; jx[2]++ ) {
for( jx[1]=jxoff[1]; jx[1]<jxoff[1]+numChild[1]; jx[1]++ ) {
for( jx[0]=jxoff[0]; jx[0]<jxoff[0]+numChild[0]; jx[0]++ ) {
if( ix[0] != jx[0] || ix[1] != jx[1] || ix[2] != jx[2] ) {
int jxp[3];
for_3d jxp[d] = ixoff[d] + jx[d] * numStride[d];
int commRank = getGlobKey(jxp,maxGlobLevel);
commBytes += MTERM * 4;
MPI_Isend(sendMultipole[0],MTERM,MPI_FLOAT,
commRank,0,MPI_COMM_WORLD,&requests[iforward]);
MPI_Irecv(recvMultipole[iforward],MTERM,MPI_FLOAT,
commRank,0,MPI_COMM_WORLD,&requests[iforward+numComm]);
iforward++;
}
}
}
}
#if PRINT_COMM
float theoBytes = numComm * MTERM * 4;
fid << "level : " << level << " M2M comm (theoretical) : " << std::setw(8) << theoBytes << ", (actual) : " << std::setw(8) << commBytes << " Bytes" << std::endl;
#endif
MPI_Waitall(numComm,requests,stats);
}
void globM2MRecv(int level) const {
int numChild[3];
for_3d numChild[d] = numPartition[level][d] / numPartition[level-1][d];
int ix[3];
for_3d ix[d] = IX[level][d];
int jxoff[3];
for_3d jxoff[d] = (ix[d] / numChild[d]) * numChild[d];
int iforward = 0;
int numComm = numChild[0] * numChild[1] * numChild[2] - 1;
MPI_Status *stats = new MPI_Status[numComm];
MPI_Waitall(numComm,&requests[numComm],stats);
int jx[3];
for( jx[2]=jxoff[2]; jx[2]<jxoff[2]+numChild[2]; jx[2]++ ) {
for( jx[1]=jxoff[1]; jx[1]<jxoff[1]+numChild[1]; jx[1]++ ) {
for( jx[0]=jxoff[0]; jx[0]<jxoff[0]+numChild[0]; jx[0]++ ) {
if( ix[0] != jx[0] || ix[1] != jx[1] || ix[2] != jx[2] ) {
int j = getGlobKey(jx,level) + globLevelOffset[level];
for_m globMultipole[j][m] = recvMultipole[iforward][m];
iforward++;
}
}
}
}
}
void globM2M() {
int rankOffset = 13 * numCells;
int i = MPIRANK + globLevelOffset[maxGlobLevel];
for_m globMultipole[i][m] = Multipole[rankOffset][m];
for( int lev=maxGlobLevel; lev>gatherLevel; lev-- ) {
logger::startTimer("Comm LET cells");
double tic = getTime();
globM2MSend(lev);
globM2MRecv(lev);
double toc = getTime();
if( printNow ) printf("M2M Comm: %lf @ lev: %d\n",toc-tic,lev);
logger::stopTimer("Comm LET cells");
logger::startTimer("Upward pass");
tic = getTime();
int numChild[3];
for_3d numChild[d] = numPartition[lev][d] / numPartition[lev-1][d];
int jxoff[3];
for_3d jxoff[d] = (IX[lev][d] / numChild[d]) * numChild[d];
int childOffset = globLevelOffset[lev];
int parentOffset = globLevelOffset[lev-1];
real diameter[3];
for_3d diameter[d] = 2 * RGlob[d] / numPartition[lev][d];
int jx[3];
for( jx[2]=jxoff[2]; jx[2]<jxoff[2]+numChild[2]; jx[2]++ ) {
for( jx[1]=jxoff[1]; jx[1]<jxoff[1]+numChild[1]; jx[1]++ ) {
for( jx[0]=jxoff[0]; jx[0]<jxoff[0]+numChild[0]; jx[0]++ ) {
int ix[3];
for_3d ix[d] = jx[d] / numChild[d];
int c = getGlobKey(jx,lev) + childOffset;
int p = getGlobKey(ix,lev-1) + parentOffset;
real dist[3];
for_3d dist[d] = (ix[d] + .5) * numChild[d] * diameter[d] - (jx[d] + .5) * diameter[d];
real M[MTERM];
real C[LTERM];
C[0] = 1;
powerM(C,dist);
for_m M[m] = globMultipole[c][m];
for_m globMultipole[p][m] += C[m] * M[0];
M2MSum(globMultipole[p],C,M);
}
}
}
toc = getTime();
if( printNow ) printf("M2M Glob: %lf @ lev: %d\n",toc-tic,lev);
logger::stopTimer("Upward pass");
}
logger::startTimer("Comm LET cells");
double tic = getTime();
gatherMultipoles();
double toc = getTime();
if( printNow ) printf("M2M Comm: %lf @ lev: %d\n",toc-tic,gatherLevel);
logger::stopTimer("Comm LET cells");
logger::startTimer("Upward pass");
for( int lev=gatherLevel; lev>0; lev-- ) {
tic = getTime();
int numChild[3];
for_3d numChild[d] = numPartition[lev][d] / numPartition[lev-1][d];
int childOffset = globLevelOffset[lev];
int parentOffset = globLevelOffset[lev-1];
real diameter[3];
for_3d diameter[d] = 2 * RGlob[d] / numPartition[lev][d];
int jx[3];
for( jx[2]=0; jx[2]<numPartition[lev][2]; jx[2]++ ) {
for( jx[1]=0; jx[1]<numPartition[lev][1]; jx[1]++ ) {
for( jx[0]=0; jx[0]<numPartition[lev][0]; jx[0]++ ) {
int ix[3];
for_3d ix[d] = jx[d] / numChild[d];
int c = getGlobKey(jx,lev) + childOffset;
int p = getGlobKey(ix,lev-1) + parentOffset;
real dist[3];
for_3d dist[d] = (ix[d] + .5) * numChild[d] * diameter[d] - (jx[d] + .5) * diameter[d];
real M[MTERM];
real C[LTERM];
C[0] = 1;
powerM(C,dist);
for_m M[m] = globMultipole[c][m];
for_m globMultipole[p][m] += C[m] * M[0];
M2MSum(globMultipole[p],C,M);
}
}
}
toc = getTime();
if( printNow ) printf("M2M Glob: %lf @ lev: %d\n",toc-tic,lev);
}
logger::stopTimer("Upward pass");
}
void globM2LSend(int level) {
MPI_Status stats[26];
int numChild[3];
for_3d numChild[d] = numPartition[level][d] / numPartition[level-1][d];
int numStride[3];
for_3d numStride[d] = numPartition[maxGlobLevel][d] / numPartition[level-1][d];
int ixc[3];
for_3d ixc[d] = IX[level-1][d];
int ixoff[3];
for_3d ixoff[d] = IX[maxGlobLevel][d] % numStride[d];
int numGroup = numChild[0] * numChild[1] * numChild[2];
float commBytes = 0;
int i = 0;
int iforward = 0;
int ix[3];
for( ix[2]=-1; ix[2]<=1; ix[2]++ ) {
for( ix[1]=-1; ix[1]<=1; ix[1]++ ) {
for( ix[0]=-1; ix[0]<=1; ix[0]++ ) {
if( ix[0] != 0 || ix[1] != 0 || ix[2] != 0 ) {
int jx[3];
for( jx[2]=ixc[2]*numChild[2]; jx[2]<(ixc[2]+1)*numChild[2]; jx[2]++ ) {
for( jx[1]=ixc[1]*numChild[1]; jx[1]<(ixc[1]+1)*numChild[1]; jx[1]++ ) {
for( jx[0]=ixc[0]*numChild[0]; jx[0]<(ixc[0]+1)*numChild[0]; jx[0]++, i++ ) {
int j = getGlobKey(jx,level) + globLevelOffset[level];
for_m sendMultipole[i][m] = globMultipole[j][m];
}
}
}
int ixp[3];
for_3d ixp[d] = (ixc[d] + ix[d] + numPartition[level-1][d]) % numPartition[level-1][d];
for_3d ixp[d] = ixoff[d] + ixp[d] * numStride[d];
int sendRank = getGlobKey(ixp,maxGlobLevel);
commBytes += numGroup * MTERM * 4;
MPI_Isend(sendMultipole[iforward*numGroup],numGroup*MTERM,MPI_FLOAT,
sendRank,iforward,MPI_COMM_WORLD,&requests[iforward]);
for_3d ixp[d] = (ixc[d] - ix[d] + numPartition[level-1][d]) % numPartition[level-1][d];
for_3d ixp[d] = ixoff[d] + ixp[d] * numStride[d];
int recvRank = getGlobKey(ixp,maxGlobLevel);
MPI_Irecv(recvMultipole[iforward*numGroup],numGroup*MTERM,MPI_FLOAT,
recvRank,iforward,MPI_COMM_WORLD,&requests[iforward+26]);
iforward++;
}
}
}
}
#if PRINT_COMM
float theoBytes = 26 * numGroup * MTERM * 4;
fid << "level : " << level << " M2L comm (theoretical) : " << std::setw(8) << theoBytes << ", (actual) : " << std::setw(8) << commBytes << " Bytes" << std::endl;
#endif
MPI_Waitall(26,requests,stats);
}
void globM2LRecv(int level) const {
MPI_Status stats[26];
MPI_Waitall(26,&requests[26],stats);
int numChild[3];
for_3d numChild[d] = numPartition[level][d] / numPartition[level-1][d];
int ixc[3];
for_3d ixc[d] = IX[level-1][d];
int i = 0;
int iforward = 0;
int ix[3];
for( ix[2]=-1; ix[2]<=1; ix[2]++ ) {
for( ix[1]=-1; ix[1]<=1; ix[1]++ ) {
for( ix[0]=-1; ix[0]<=1; ix[0]++ ) {
if( ix[0] != 0 || ix[1] != 0 || ix[2] != 0 ) {
int ixp[3];
for_3d ixp[d] = (ixc[d] - ix[d] + numPartition[level-1][d]) % numPartition[level-1][d];
int jx[3];
for( jx[2]=ixp[2]*numChild[2]; jx[2]<(ixp[2]+1)*numChild[2]; jx[2]++ ) {
for( jx[1]=ixp[1]*numChild[1]; jx[1]<(ixp[1]+1)*numChild[1]; jx[1]++ ) {
for( jx[0]=ixp[0]*numChild[0]; jx[0]<(ixp[0]+1)*numChild[0]; jx[0]++, i++ ) {
int j = getGlobKey(jx,level) + globLevelOffset[level];
for_m globMultipole[j][m] = recvMultipole[i][m];
}
}
}
iforward++;
}
}
}
}
}
void globM2L(std::ofstream &fid2) {
for( int lev=maxGlobLevel; lev>0; lev-- ) {
MPI_Barrier(MPI_COMM_WORLD);
logger::startTimer("Comm LET cells");
double tic = getTime();
if( lev > gatherLevel ) {
globM2LSend(lev);
globM2LRecv(lev);
}
double toc = getTime();
if( lev > 1 ) fid2 << toc-tic << std::endl;
if( printNow ) printf("M2L Comm: %lf @ lev: %d\n",toc-tic,lev);
logger::stopTimer("Comm LET cells");
logger::startTimer("Traverse");
tic = getTime();
int nxmin[3] = {0, 0, 0};
int nxmax[3] = {numPartition[lev-1][0]-1,numPartition[lev-1][1]-1,numPartition[lev-1][2]-1};
int nunit[3] = {numPartition[lev][0],numPartition[lev][1],numPartition[lev][2]};
real diameter[3];
for_3d diameter[d] = 2 * RGlob[d] / numPartition[lev][d];
if( numImages != 0 ) {
for_3d nxmin[d] = -nxmax[d] - 1;
for_3d nxmax[d] = 2 * nxmax[d] + 1;
}
real L[LTERM];
for_l L[l] = 0;
int ix[3];
for_3d ix[d] = IX[lev][d];
int ixp[3];
for_3d ixp[d] = IX[lev-1][d];
int jxmin[3];
for_3d jxmin[d] = FMMMAX(nxmin[d], ixp[d] - 1) * numPartition[lev][d] / numPartition[lev-1][d];
int jxmax[3];
for_3d jxmax[d] = (FMMMIN(nxmax[d], ixp[d] + 1) + 1) * numPartition[lev][d] / numPartition[lev-1][d];
int jx[3];
for( jx[2]=jxmin[2]; jx[2]<jxmax[2]; jx[2]++ ) {
for( jx[1]=jxmin[1]; jx[1]<jxmax[1]; jx[1]++ ) {
for( jx[0]=jxmin[0]; jx[0]<jxmax[0]; jx[0]++ ) {
if(jx[0] < ix[0]-1 || ix[0]+1 < jx[0] ||
jx[1] < ix[1]-1 || ix[1]+1 < jx[1] ||
jx[2] < ix[2]-1 || ix[2]+1 < jx[2]) {
int jxp[3];
for_3d jxp[d] = (jx[d] + nunit[d]) % nunit[d];
int j = getGlobKey(jxp,lev) + globLevelOffset[lev];
real M[MTERM];
for_m M[m] = globMultipole[j][m];
real dist[3];
for_3d dist[d] = (ix[d] - jx[d]) * diameter[d];
real invR2 = 1. / (dist[0] * dist[0] + dist[1] * dist[1] + dist[2] * dist[2]);
real invR = sqrt(invR2);
real C[LTERM];
getCoef(C,dist,invR2,invR);
M2LSum(L,C,M);
}
}
}
}
for_l globLocal[lev][l] += L[l];
toc = getTime();
if( printNow ) printf("M2L Glob: %lf @ lev: %d\n",toc-tic,lev);
logger::stopTimer("Traverse");
}
}
void globL2L() const {
for( int lev=1; lev<=maxGlobLevel; lev++ ) {
real diameter[3];
for_3d diameter[d] = 2 * RGlob[d] / numPartition[lev][d];
real dist[3];
for_3d dist[d] = (IX[lev][d] + .5) * diameter[d] - (IX[lev-1][d] + .5) * 2 * diameter[d];
real C[LTERM];
C[0] = 1;
powerL(C,dist);
for_l globLocal[lev][l] += globLocal[lev-1][l];
for( int l=1; l<LTERM; l++ ) globLocal[lev][0] += C[l] * globLocal[lev-1][l];
L2LSum(globLocal[lev],C,globLocal[lev-1]);
}
for_l Local[0][l] += globLocal[maxGlobLevel][l];
}
void globDirect() {
const int numTarget = 100;
MPI_Status stats[2];
real (*Ibodies2)[4] = new real [numTarget][4];
real (*Jbodies2)[4] = new real [numBodies][4];
for( int i=0; i<numTarget; i++ ) {
for_4d Ibodies2[i][d] = 0;
}
for( int i=0; i<numBodies; i++ ) {
for_4d Jbodies2[i][d] = Jbodies[i][d];
}
const int sendRank = (MPIRANK + 1 ) % MPISIZE;
const int recvRank = (MPIRANK - 1 + MPISIZE) % MPISIZE;
for( int irank=0; irank<MPISIZE; irank++ ) {
for( int i=0; i<numBodies; i++ ) {
for_4d sendJbodies[i][d] = Jbodies2[i][d];
}
int newBodies = 0;
MPI_Isend(&numBodies,1,MPI_INT,sendRank,
1,MPI_COMM_WORLD,&requests[0]);
MPI_Irecv(&newBodies,1,MPI_INT,recvRank,
1,MPI_COMM_WORLD,&requests[1]);
MPI_Waitall(2,requests,stats);
MPI_Isend(sendJbodies[0],numBodies*4,MPI_FLOAT,sendRank,
1,MPI_COMM_WORLD,&requests[0]);
MPI_Irecv(recvJbodies[0],newBodies*4,MPI_FLOAT,recvRank,
1,MPI_COMM_WORLD,&requests[1]);
int prange = numImages == 0 ? 0 : pow(3,numImages - 1);
#pragma omp parallel for
for( int i=0; i<numTarget; i++ ) {
real bodies[4] = {0, 0, 0, 0};
int jx[3];
for( jx[2]=-prange; jx[2]<=prange; jx[2]++ ) {
for( jx[1]=-prange; jx[1]<=prange; jx[1]++ ) {
for( jx[0]=-prange; jx[0]<=prange; jx[0]++ ) {
for( int j=0; j<numBodies; j++ ) {
real dist[3];
for_3d dist[d] = Jbodies[i][d] - Jbodies2[j][d] - jx[d] * 2 * RGlob[d];
real R2 = dist[0] * dist[0] + dist[1] * dist[1] + dist[2] * dist[2];
real invR2 = 1.0 / R2;
if( R2 == 0 ) invR2 = 0;
real invR = Jbodies2[j][3] * sqrt(invR2);
real invR3 = invR2 * invR;
bodies[0] += invR;
for_3d bodies[d+1] -= dist[d] * invR3;
}
}
}
}
for_4d Ibodies2[i][d] += bodies[d];
}
MPI_Waitall(2,requests,stats);
numBodies = newBodies;
for( int i=0; i<numBodies; i++ ) {
for_4d Jbodies2[i][d] = recvJbodies[i][d];
}
}
real diff1 = 0, norm1 = 0, diff2 = 0, norm2 = 0;
for( int i=0; i<numTarget; i++ ) {
diff1 += (Ibodies[i][0] - Ibodies2[i][0]) * (Ibodies[i][0] - Ibodies2[i][0]);
norm1 += Ibodies2[i][0] * Ibodies2[i][0];
for_3d diff2 += (Ibodies[i][d+1] - Ibodies2[i][d+1]) * (Ibodies[i][d+1] - Ibodies2[i][d+1]);
for_3d norm2 += Ibodies2[i][d+1] * Ibodies2[i][d+1];
}
real diff3 = 0, norm3 = 0, diff4 = 0, norm4 = 0;
MPI_Reduce(&diff1, &diff3, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&norm1, &norm3, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&diff2, &diff4, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
MPI_Reduce(&norm2, &norm4, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
if( MPIRANK == 0 ) printf("Err Pot : %lf\n",sqrt(diff3/norm3));
if( MPIRANK == 0 ) printf("Err Forc: %lf\n",sqrt(diff4/norm4));
delete[] Ibodies2;
delete[] Jbodies2;
}
};
|
GB_binop__times_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_01__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_03__times_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_int16)
// A*D function (colscale): GB (_AxD__times_int16)
// D*A function (rowscale): GB (_DxB__times_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__times_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__times_int16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_int16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_int16)
// C=scalar+B GB (_bind1st__times_int16)
// C=scalar+B' GB (_bind1st_tran__times_int16)
// C=A+scalar GB (_bind2nd__times_int16)
// C=A'+scalar GB (_bind2nd_tran__times_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x * y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_INT16 || GxB_NO_TIMES_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__times_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__times_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB (_bind1st_tran__times_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB (_bind2nd_tran__times_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__signum_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__signum_fc32_fc32)
// op(A') function: GB (_unop_tran__signum_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_csignumf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_csignumf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_csignumf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIGNUM || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__signum_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_csignumf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_csignumf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__signum_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
depthwise_convolution_3x3.c | /*
* Copyright (C) 2016-2022 T-Head Semiconductor Co., Ltd. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the License); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* CSI-NN2 version 1.12.x */
#include "csi_thead_rvv.h"
/*************************************************************
note: VLEN = 128/256
*************************************************************/
int csi_nn_rvv_dwconv3x3s1_fp32(struct csi_tensor *input, struct csi_tensor *output,
struct csi_tensor *kernel, struct csi_tensor *bias,
struct conv2d_params *params)
{
float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
float *kernel_data = (float *)kernel->data;
float *bias_data = (float *)bias->data;
int32_t batch = input->dim[0];
int32_t in_c = input->dim[1]; // group = in_channel
int32_t in_h = input->dim[2];
int32_t in_w = input->dim[3];
int32_t out_c = output->dim[1];
int32_t out_h = output->dim[2];
int32_t out_w = output->dim[3];
float *input_padd_buf =
(float *)csi_mem_alloc(in_c * (in_h + params->pad_top + params->pad_down) *
(in_w + params->pad_left + params->pad_right) * sizeof(float));
csi_nn_rvv_pad_input_fp32(
input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down,
in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
float *out = output_data + c * out_h * out_w;
float *outptr0 = out;
float *outptr1 = outptr0 + out_w;
const float bias0 = bias_data ? bias_data[c] : 0.0f;
float *img0 = input_padd_buf + c * in_h * in_w;
float *r0 = img0;
float *r1 = r0 + in_w;
float *r2 = r1 + in_w;
float *r3 = r2 + in_w;
const float *kernel0 = kernel_data + c * 9;
float k00 = kernel0[0];
float k01 = kernel0[1];
float k02 = kernel0[2];
float k10 = kernel0[3];
float k11 = kernel0[4];
float k12 = kernel0[5];
float k20 = kernel0[6];
float k21 = kernel0[7];
float k22 = kernel0[8];
int vl;
int w_loop = csrr_vlenb() / sizeof(float); // VLEN128=4 VLEN256=8
int w2_loop = w_loop * 2;
// TODO: 优化指令序列,调整 intrinsic ,达到和汇编类似的指令序列
int h = 0;
// h2 loop
for (; h + 1 < out_h; h += 2) {
vl = vsetvl_e32m2(w2_loop);
int w = 0;
// h2w8 loop
for (; w + w2_loop - 1 < out_w; w += w2_loop) {
vfloat32m2_t _acc0 = vfmv_v_f_f32m2(bias0, vl);
vfloat32m2_t _acc1 = vfmv_v_f_f32m2(bias0, vl);
vfloat32m2_t _r0_0_7 = vle32_v_f32m2(r0, vl);
vfloat32m2_t _r0_1_8 = vle32_v_f32m2(r0 + 1, vl);
vfloat32m2_t _r0_2_9 = vle32_v_f32m2(r0 + 2, vl);
vfloat32m2_t _r1_0_7 = vle32_v_f32m2(r1, vl);
vfloat32m2_t _r1_1_8 = vle32_v_f32m2(r1 + 1, vl);
vfloat32m2_t _r1_2_9 = vle32_v_f32m2(r1 + 2, vl);
vfloat32m2_t _r2_0_7 = vle32_v_f32m2(r2, vl);
vfloat32m2_t _r2_1_8 = vle32_v_f32m2(r2 + 1, vl);
vfloat32m2_t _r2_2_9 = vle32_v_f32m2(r2 + 2, vl);
vfloat32m2_t _r3_0_7 = vle32_v_f32m2(r3, vl);
vfloat32m2_t _r3_1_8 = vle32_v_f32m2(r3 + 1, vl);
vfloat32m2_t _r3_2_9 = vle32_v_f32m2(r3 + 2, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k00, _r0_0_7, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k01, _r0_1_8, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k02, _r0_2_9, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k10, _r1_0_7, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k11, _r1_1_8, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k12, _r1_2_9, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k20, _r2_0_7, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k21, _r2_1_8, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k22, _r2_2_9, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k00, _r1_0_7, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k01, _r1_1_8, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k02, _r1_2_9, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k10, _r2_0_7, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k11, _r2_1_8, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k12, _r2_2_9, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k20, _r3_0_7, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k21, _r3_1_8, vl);
_acc1 = vfmacc_vf_f32m2(_acc1, k22, _r3_2_9, vl);
vse32_v_f32m2(outptr0, _acc0, vl);
vse32_v_f32m2(outptr1, _acc1, vl);
r0 += vl;
r1 += vl;
r2 += vl;
r3 += vl;
outptr0 += vl;
outptr1 += vl;
}
// h2w4
for (; w + w_loop - 1 < out_w; w += w_loop) {
vl = vsetvl_e32m1(w_loop);
vfloat32m1_t _acc0 = vfmv_v_f_f32m1(bias0, vl);
vfloat32m1_t _acc1 = vfmv_v_f_f32m1(bias0, vl);
vfloat32m1_t _r0_0_3 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r0_1_4 = vle32_v_f32m1(r0 + 1, vl);
vfloat32m1_t _r0_2_5 = vle32_v_f32m1(r0 + 2, vl);
vfloat32m1_t _r1_0_3 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r1_1_4 = vle32_v_f32m1(r1 + 1, vl);
vfloat32m1_t _r1_2_5 = vle32_v_f32m1(r1 + 2, vl);
vfloat32m1_t _r2_0_3 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r2_1_4 = vle32_v_f32m1(r2 + 1, vl);
vfloat32m1_t _r2_2_5 = vle32_v_f32m1(r2 + 2, vl);
vfloat32m1_t _r3_0_3 = vle32_v_f32m1(r3, vl);
vfloat32m1_t _r3_1_4 = vle32_v_f32m1(r3 + 1, vl);
vfloat32m1_t _r3_2_5 = vle32_v_f32m1(r3 + 2, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k00, _r0_0_3, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k01, _r0_1_4, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k02, _r0_2_5, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k10, _r1_0_3, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k11, _r1_1_4, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k12, _r1_2_5, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k20, _r2_0_3, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k21, _r2_1_4, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k22, _r2_2_5, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k00, _r1_0_3, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k01, _r1_1_4, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k02, _r1_2_5, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k10, _r2_0_3, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k11, _r2_1_4, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k12, _r2_2_5, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k20, _r3_0_3, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k21, _r3_1_4, vl);
_acc1 = vfmacc_vf_f32m1(_acc1, k22, _r3_2_5, vl);
vse32_v_f32m1(outptr0, _acc0, vl);
vse32_v_f32m1(outptr1, _acc1, vl);
r0 += vl;
r1 += vl;
r2 += vl;
r3 += vl;
outptr0 += vl;
outptr1 += vl;
}
vl = vsetvl_e32m1(3);
vfloat32m1_t _k0 = vle32_v_f32m1(kernel0, vl);
vfloat32m1_t _k1 = vle32_v_f32m1(kernel0 + 3, vl);
vfloat32m1_t _k2 = vle32_v_f32m1(kernel0 + 6, vl);
vfloat32m1_t _tmp = vfmv_v_f_f32m1(bias0, vl);
// h2w_tail
for (; w < out_w; w++) {
vfloat32m1_t _r0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r1 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r2 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r3 = vle32_v_f32m1(r3, vl);
vfloat32m1_t _acc0 = vfmul_vv_f32m1(_k0, _r0, vl);
_acc0 = vfmacc_vv_f32m1(_acc0, _k1, _r1, vl);
_acc0 = vfmacc_vv_f32m1(_acc0, _k2, _r2, vl);
vfloat32m1_t _acc0_tmp =
vfredusum_vs_f32m1_f32m1(vundefined_f32m1(), _acc0, _tmp, vl);
float res0 = vfmv_f_s_f32m1_f32(_acc0_tmp);
vfloat32m1_t _acc1 = vfmul_vv_f32m1(_k0, _r1, vl);
_acc1 = vfmacc_vv_f32m1(_acc1, _k1, _r2, vl);
_acc1 = vfmacc_vv_f32m1(_acc1, _k2, _r3, vl);
vfloat32m1_t _acc1_tmp =
vfredusum_vs_f32m1_f32m1(vundefined_f32m1(), _acc1, _tmp, vl);
float res1 = vfmv_f_s_f32m1_f32(_acc1_tmp);
r0++;
r1++;
r2++;
r3++;
*outptr0++ = res0;
*outptr1++ = res1;
}
r0 += 2 + in_w;
r1 += 2 + in_w;
r2 += 2 + in_w;
r3 += 2 + in_w;
outptr0 += out_w;
outptr1 += out_w;
}
// h1
for (; h < out_h; h++) {
vl = vsetvl_e32m2(w2_loop);
int w = 0;
// h1w8 loop
for (; w + w2_loop - 1 < out_w; w += w2_loop) {
vfloat32m2_t _acc0 = vfmv_v_f_f32m2(bias0, vl);
vfloat32m2_t _r0_0_7 = vle32_v_f32m2(r0, vl);
vfloat32m2_t _r0_1_8 = vle32_v_f32m2(r0 + 1, vl);
vfloat32m2_t _r0_2_9 = vle32_v_f32m2(r0 + 2, vl);
vfloat32m2_t _r1_0_7 = vle32_v_f32m2(r1, vl);
vfloat32m2_t _r1_1_8 = vle32_v_f32m2(r1 + 1, vl);
vfloat32m2_t _r1_2_9 = vle32_v_f32m2(r1 + 2, vl);
vfloat32m2_t _r2_0_7 = vle32_v_f32m2(r2, vl);
vfloat32m2_t _r2_1_8 = vle32_v_f32m2(r2 + 1, vl);
vfloat32m2_t _r2_2_9 = vle32_v_f32m2(r2 + 2, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k00, _r0_0_7, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k01, _r0_1_8, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k02, _r0_2_9, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k10, _r1_0_7, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k11, _r1_1_8, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k12, _r1_2_9, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k20, _r2_0_7, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k21, _r2_1_8, vl);
_acc0 = vfmacc_vf_f32m2(_acc0, k22, _r2_2_9, vl);
vse32_v_f32m2(outptr0, _acc0, vl);
r0 += vl;
r1 += vl;
r2 += vl;
outptr0 += vl;
}
// h1w4
for (; w + w_loop - 1 < out_w; w += w_loop) {
vl = vsetvl_e32m1(w_loop);
vfloat32m1_t _acc0 = vfmv_v_f_f32m1(bias0, vl);
vfloat32m1_t _r0_0_3 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r0_1_4 = vle32_v_f32m1(r0 + 1, vl);
vfloat32m1_t _r0_2_5 = vle32_v_f32m1(r0 + 2, vl);
vfloat32m1_t _r1_0_3 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r1_1_4 = vle32_v_f32m1(r1 + 1, vl);
vfloat32m1_t _r1_2_5 = vle32_v_f32m1(r1 + 2, vl);
vfloat32m1_t _r2_0_3 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _r2_1_4 = vle32_v_f32m1(r2 + 1, vl);
vfloat32m1_t _r2_2_5 = vle32_v_f32m1(r2 + 2, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k00, _r0_0_3, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k01, _r0_1_4, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k02, _r0_2_5, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k10, _r1_0_3, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k11, _r1_1_4, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k12, _r1_2_5, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k20, _r2_0_3, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k21, _r2_1_4, vl);
_acc0 = vfmacc_vf_f32m1(_acc0, k22, _r2_2_5, vl);
vse32_v_f32m1(outptr0, _acc0, vl);
r0 += vl;
r1 += vl;
r2 += vl;
outptr0 += vl;
}
vl = vsetvl_e32m1(3);
vfloat32m1_t _k0 = vle32_v_f32m1(kernel0, vl);
vfloat32m1_t _k1 = vle32_v_f32m1(kernel0 + 3, vl);
vfloat32m1_t _k2 = vle32_v_f32m1(kernel0 + 6, vl);
vfloat32m1_t _tmp = vfmv_v_f_f32m1(bias0, vl);
// h1w_tail
for (; w < out_w; w++) {
vfloat32m1_t _r0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r1 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r2 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _acc0 = vfmul_vv_f32m1(_k0, _r0, vl);
_acc0 = vfmacc_vv_f32m1(_acc0, _k1, _r1, vl);
_acc0 = vfmacc_vv_f32m1(_acc0, _k2, _r2, vl);
vfloat32m1_t _acc0_tmp =
vfredusum_vs_f32m1_f32m1(vundefined_f32m1(), _acc0, _tmp, vl);
float res0 = vfmv_f_s_f32m1_f32(_acc0_tmp);
r0++;
r1++;
r2++;
*outptr0++ = res0;
}
}
}
csi_mem_free(input_padd_buf);
return CSINN_TRUE;
}
int csi_nn_rvv_dwconv3x3s2_fp32(struct csi_tensor *input, struct csi_tensor *output,
struct csi_tensor *kernel, struct csi_tensor *bias,
struct conv2d_params *params)
{
float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
float *kernel_data = (float *)kernel->data;
float *bias_data = (float *)bias->data;
int32_t batch = input->dim[0];
int32_t in_c = input->dim[1]; // group = in_channel
int32_t in_h = input->dim[2];
int32_t in_w = input->dim[3];
int32_t out_c = output->dim[1];
int32_t out_h = output->dim[2];
int32_t out_w = output->dim[3];
float *input_padd_buf =
(float *)csi_mem_alloc(in_c * (in_h + params->pad_top + params->pad_down) *
(in_w + params->pad_left + params->pad_right) * sizeof(float));
csi_nn_rvv_pad_input_fp32(
input_data, input_padd_buf, in_c, in_h, in_w, in_h + params->pad_top + params->pad_down,
in_w + params->pad_left + params->pad_right, params->pad_top, params->pad_left);
in_h = in_h + params->pad_top + params->pad_down;
in_w = in_w + params->pad_left + params->pad_right;
int tailstep = in_w - 2 * out_w + in_w;
#pragma omp parallel for num_threads(1)
for (int c = 0; c < in_c; c++) {
float *out = output_data + c * out_h * out_w;
float *outptr0 = out;
const float bias0 = bias_data ? bias_data[c] : 0.0f;
float *img0 = input_padd_buf + c * in_h * in_w;
float *r0 = img0;
float *r1 = r0 + in_w;
float *r2 = r1 + in_w;
const float *kernel0 = kernel_data + c * 9;
float k00 = kernel0[0];
float k01 = kernel0[1];
float k02 = kernel0[2];
float k10 = kernel0[3];
float k11 = kernel0[4];
float k12 = kernel0[5];
float k20 = kernel0[6];
float k21 = kernel0[7];
float k22 = kernel0[8];
int vl;
int w_loop = csrr_vlenb() / sizeof(float); // VLEN128=4 VLEN256=8
for (int h = 0; h < out_h; h++) {
vl = vsetvl_e32m1(w_loop);
int w = 0;
// h1w4 loop
for (; w + w_loop - 1 < out_w; w += w_loop) {
vfloat32m1_t _acc = vfmv_v_f_f32m1(bias0, vl);
vfloat32m1_t _r0_0_6, _r0_1_7;
vfloat32m1_t _r1_0_6, _r1_1_7;
vfloat32m1_t _r2_0_6, _r2_1_7;
vlseg2e32_v_f32m1(&_r0_0_6, &_r0_1_7, r0, vl);
r0 += 2;
vfloat32m1_t _r0_2_8 = vlse32_v_f32m1(r0, 2 * sizeof(float), vl);
r0 += (w_loop - 1) * 2;
vlseg2e32_v_f32m1(&_r1_0_6, &_r1_1_7, r1, vl);
r1 += 2;
vfloat32m1_t _r1_2_8 = vlse32_v_f32m1(r1, 2 * sizeof(float), vl);
r1 += (w_loop - 1) * 2;
vlseg2e32_v_f32m1(&_r2_0_6, &_r2_1_7, r2, vl);
r2 += 2;
vfloat32m1_t _r2_2_8 = vlse32_v_f32m1(r2, 2 * sizeof(float), vl);
r2 += (w_loop - 1) * 2;
_acc = vfmacc_vf_f32m1(_acc, k00, _r0_0_6, vl);
_acc = vfmacc_vf_f32m1(_acc, k01, _r0_1_7, vl);
_acc = vfmacc_vf_f32m1(_acc, k02, _r0_2_8, vl);
_acc = vfmacc_vf_f32m1(_acc, k10, _r1_0_6, vl);
_acc = vfmacc_vf_f32m1(_acc, k11, _r1_1_7, vl);
_acc = vfmacc_vf_f32m1(_acc, k12, _r1_2_8, vl);
_acc = vfmacc_vf_f32m1(_acc, k20, _r2_0_6, vl);
_acc = vfmacc_vf_f32m1(_acc, k21, _r2_1_7, vl);
_acc = vfmacc_vf_f32m1(_acc, k22, _r2_2_8, vl);
vse32_v_f32m1(outptr0, _acc, vl);
outptr0 += vl;
}
vl = vsetvl_e32m1(3);
vfloat32m1_t _k0 = vle32_v_f32m1(kernel0, vl);
vfloat32m1_t _k1 = vle32_v_f32m1(kernel0 + 3, vl);
vfloat32m1_t _k2 = vle32_v_f32m1(kernel0 + 6, vl);
vfloat32m1_t _tmp = vfmv_v_f_f32m1(bias0, vl);
// h1w_tail
for (; w < out_w; w++) {
vfloat32m1_t _r0 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r1 = vle32_v_f32m1(r1, vl);
vfloat32m1_t _r2 = vle32_v_f32m1(r2, vl);
vfloat32m1_t _acc0 = vfmul_vv_f32m1(_k0, _r0, vl);
_acc0 = vfmacc_vv_f32m1(_acc0, _k1, _r1, vl);
_acc0 = vfmacc_vv_f32m1(_acc0, _k2, _r2, vl);
vfloat32m1_t _acc0_tmp =
vfredusum_vs_f32m1_f32m1(vundefined_f32m1(), _acc0, _tmp, vl);
float res0 = vfmv_f_s_f32m1_f32(_acc0_tmp);
r0 += 2;
r1 += 2;
r2 += 2;
*outptr0++ = res0;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
csi_mem_free(input_padd_buf);
return CSINN_TRUE;
}
|
GB_binop__land_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__land_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int16)
// A*D function (colscale): GB (_AxD__land_int16)
// D*A function (rowscale): GB (_DxB__land_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int16)
// C=scalar+B GB (_bind1st__land_int16)
// C=scalar+B' GB (_bind1st_tran__land_int16)
// C=A+scalar GB (_bind2nd__land_int16)
// C=A'+scalar GB (_bind2nd_tran__land_int16)
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT16 || GxB_NO_LAND_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *restrict Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_v1_scalar_privatization.c | /* a local variable to transfer temp value
* It introduces "fake" data dependence since the variable is local to each iteration
* */
#include <omp.h>
int a[100];
int b[100];
void foo()
{
int i;
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
int tmp;
tmp = a[i] + i;
b[i] = tmp;
}
}
/*
*-------------Dump the dependence graph for the first loop in a function body!------------
// Output dependence
// Loop-carried ,why CarryLevel =1????
dep SgExprStatement:tmp =((a[i]) + i); SgExprStatement:tmp =((a[i]) + i); 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 1 SgVarRefExp:tmp@11:9->SgVarRefExp:tmp@11:9 == 0;||::
//True dependence for both
//a) loop independent (within an iteration) and
//b) loop carried (across iterations) : This is sure thing if a) holds
dep SgExprStatement:tmp =((a[i]) + i); SgExprStatement:b[i] = tmp; 1*1 SCALAR_DEP; commonlevel = 1 CarryLevel = 1 SgVarRefExp:tmp@11:9->SgVarRefExp:tmp@12:12 == 0;||::
//Anti dependence
//Loop carried(BACK_DEP) scalar
dep SgExprStatement:b[i] = tmp; SgExprStatement:tmp =((a[i]) + i); 1*1 SCALAR_BACK_DEP; commonlevel = 1 CarryLevel = 0 SgVarRefExp:tmp@12:12->SgVarRefExp:tmp@11:9 <= -1;||::
*/
|
GB_binop__isge_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_uint32)
// A*D function (colscale): GB (_AxD__isge_uint32)
// D*A function (rowscale): GB (_DxB__isge_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_uint32)
// C=scalar+B GB (_bind1st__isge_uint32)
// C=scalar+B' GB (_bind1st_tran__isge_uint32)
// C=A+scalar GB (_bind2nd__isge_uint32)
// C=A'+scalar GB (_bind2nd_tran__isge_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_UINT32 || GxB_NO_ISGE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
depend-5.c | /* { dg-do compile } */
/* { dg-options "-fopenmp" } */
struct T { int c[3]; };
struct S { int a; struct T *b; struct T g; };
struct S d[10];
struct S *e[10];
struct S *f;
struct S h;
void
foo (void)
{
#pragma omp task depend(inout: d)
;
#pragma omp task depend(out: d[2])
;
#pragma omp task depend(in: d[:])
;
#pragma omp task depend(in: d[2:2])
;
#pragma omp task depend(in: d[:2])
;
#pragma omp task depend(inout: d[1].b->c[2])
;
#pragma omp task depend(out: d[0].a)
;
#pragma omp task depend(in: e[3]->a)
;
#pragma omp task depend(inout: e[2]->b->c)
;
#pragma omp task depend(in: e[1]->b->c[2])
;
#pragma omp task depend(out: (*f).a)
;
#pragma omp task depend(inout: f->b->c[0])
;
#pragma omp task depend(in: f)
;
#pragma omp task depend(out: *f)
;
#pragma omp task depend(inout: f[0])
;
#pragma omp task depend(in: f[0].a)
;
#pragma omp task depend(inout: h.g.c[2])
;
}
|
convolution_sgemm_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv_im2col_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
// im2row
Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
for (int p = 0; p < inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i + 1);
const signed char* img2 = bottom_im2row.row<signed char>(i + 2);
const signed char* img3 = bottom_im2row.row<signed char>(i + 3);
signed char* tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p + 0) * inch * kernel_size;
const signed char* k1 = kernel + (p + 1) * inch * kernel_size;
const signed char* k2 = kernel + (p + 2) * inch * kernel_size;
const signed char* k3 = kernel + (p + 3) * inch * kernel_size;
signed char* ktmp = kernel_tm.channel(p / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q += 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
const signed char* k0 = kernel + (p + 0) * inch * kernel_size;
signed char* ktmp = kernel_tm.channel(p / 4 + p % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
int* output0 = top_blob.channel(i);
int* output1 = top_blob.channel(i + 1);
int* output2 = top_blob.channel(i + 2);
int* output3 = top_blob.channel(i + 3);
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char* vb = bottom_tm.channel(j / 4);
signed char* va = kernel_tm.channel(i / 4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[2 * n]; // k0
sum0[n] += (int)va[1] * vb[2 * n + 1];
sum1[n] += (int)va[2] * vb[2 * n]; // k1
sum1[n] += (int)va[3] * vb[2 * n + 1];
sum2[n] += (int)va[4] * vb[2 * n]; // k2
sum2[n] += (int)va[5] * vb[2 * n + 1];
sum3[n] += (int)va[6] * vb[2 * n]; // k3
sum3[n] += (int)va[7] * vb[2 * n + 1];
}
va += 8;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j / 4 + j % 4);
signed char* va = kernel_tm.channel(i / 4);
int k = 0;
for (; k + 1 < K; k = k + 2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k < K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
int* output = top_blob.channel(i);
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char* vb = bottom_tm.channel(j / 4);
signed char* va = kernel_tm.channel(i / 4 + i % 4);
int sum[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[2 * n];
sum[n] += (int)va[1] * vb[2 * n + 1];
}
va += 2;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = sum[n];
}
output += 4;
}
for (; j < N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j / 4 + j % 4);
signed char* va = kernel_tm.channel(i / 4 + i % 4);
for (int k = 0; k < K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_dequant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_dequant, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
const float* bias = _bias;
// im2row
Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
for (int p = 0; p < inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i + 1);
const signed char* img2 = bottom_im2row.row<signed char>(i + 2);
const signed char* img3 = bottom_im2row.row<signed char>(i + 3);
signed char* tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p + 0) * inch * kernel_size;
const signed char* k1 = kernel + (p + 1) * inch * kernel_size;
const signed char* k2 = kernel + (p + 2) * inch * kernel_size;
const signed char* k3 = kernel + (p + 3) * inch * kernel_size;
signed char* ktmp = kernel_tm.channel(p / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q += 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
const signed char* k0 = kernel + (p + 0) * inch * kernel_size;
signed char* ktmp = kernel_tm.channel(p / 4 + p % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
const float bias0 = bias ? bias[i] : 0.f;
const float bias1 = bias ? bias[i + 1] : 0.f;
const float bias2 = bias ? bias[i + 2] : 0.f;
const float bias3 = bias ? bias[i + 3] : 0.f;
const float scale_dequant0 = scale_dequant[i];
const float scale_dequant1 = scale_dequant[i + 1];
const float scale_dequant2 = scale_dequant[i + 2];
const float scale_dequant3 = scale_dequant[i + 3];
float* output0 = top_blob.channel(i);
float* output1 = top_blob.channel(i + 1);
float* output2 = top_blob.channel(i + 2);
float* output3 = top_blob.channel(i + 3);
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char* vb = bottom_tm.channel(j / 4);
signed char* va = kernel_tm.channel(i / 4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[2 * n]; // k0
sum0[n] += (int)va[1] * vb[2 * n + 1];
sum1[n] += (int)va[2] * vb[2 * n]; // k1
sum1[n] += (int)va[3] * vb[2 * n + 1];
sum2[n] += (int)va[4] * vb[2 * n]; // k2
sum2[n] += (int)va[5] * vb[2 * n + 1];
sum3[n] += (int)va[6] * vb[2 * n]; // k3
sum3[n] += (int)va[7] * vb[2 * n + 1];
}
va += 8;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = (float)sum0[n] * scale_dequant0 + bias0;
output1[n] = (float)sum1[n] * scale_dequant1 + bias1;
output2[n] = (float)sum2[n] * scale_dequant2 + bias2;
output3[n] = (float)sum3[n] * scale_dequant3 + bias3;
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j / 4 + j % 4);
signed char* va = kernel_tm.channel(i / 4);
int k = 0;
for (; k + 1 < K; k = k + 2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k < K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = (float)sum0 * scale_dequant0 + bias0;
output1[0] = (float)sum1 * scale_dequant1 + bias1;
output2[0] = (float)sum2 * scale_dequant2 + bias2;
output3[0] = (float)sum3 * scale_dequant3 + bias3;
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
float* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
const float scale_dequant0 = scale_dequant[i];
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char* vb = bottom_tm.channel(j / 4);
signed char* va = kernel_tm.channel(i / 4 + i % 4);
int sum[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[2 * n];
sum[n] += (int)va[1] * vb[2 * n + 1];
}
va += 2;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = (float)sum[n] * scale_dequant0 + bias0;
}
output += 4;
}
for (; j < N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j / 4 + j % 4);
signed char* va = kernel_tm.channel(i / 4 + i % 4);
for (int k = 0; k < K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = (float)sum * scale_dequant0 + bias0;
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
static void conv_im2col_sgemm_int8_requant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel,
const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_requant, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char* kernel = _kernel;
const float* bias = _bias;
// im2row
Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator);
{
signed char* ret = (signed char*)bottom_im2row;
int retID = 0;
for (int i = 0; i < outh; i++)
{
for (int j = 0; j < outw; j++)
{
for (int p = 0; p < inch; p++)
{
const signed char* input = bottom_blob.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
int row = u + i * stride_h;
int col = v + j * stride_w;
int index = row * w + col;
ret[retID] = input[index];
retID++;
}
}
}
}
}
}
int kernel_size = kernel_w * kernel_h;
int out_size = outw * outh;
// int M = outch; // outch
int N = outw * outh; // outsize or out stride
int K = kernel_w * kernel_h * inch; // ksize * inch
// bottom_im2row memory packed 4 x 4
Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = out_size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_im2row.row<signed char>(i);
const signed char* img1 = bottom_im2row.row<signed char>(i + 1);
const signed char* img2 = bottom_im2row.row<signed char>(i + 2);
const signed char* img3 = bottom_im2row.row<signed char>(i + 3);
signed char* tmpptr = bottom_tm.channel(i / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img1[0];
tmpptr[3] = img1[1];
tmpptr[4] = img2[0];
tmpptr[5] = img2[1];
tmpptr[6] = img3[0];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += 2;
img1 += 2;
img2 += 2;
img3 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += 1;
img1 += 1;
img2 += 1;
img3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < out_size; i++)
{
const signed char* img0 = bottom_im2row.row<signed char>(i);
signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += 2;
}
for (; q < inch * kernel_size; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += 1;
}
}
}
// kernel memory packed 4 x 4
Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator);
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p + 0) * inch * kernel_size;
const signed char* k1 = kernel + (p + 1) * inch * kernel_size;
const signed char* k2 = kernel + (p + 2) * inch * kernel_size;
const signed char* k3 = kernel + (p + 3) * inch * kernel_size;
signed char* ktmp = kernel_tm.channel(p / 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q += 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
const signed char* k0 = kernel + (p + 0) * inch * kernel_size;
signed char* ktmp = kernel_tm.channel(p / 4 + p % 4);
int q = 0;
for (; q + 1 < inch * kernel_size; q = q + 2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q < inch * kernel_size; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
// 4x4
// sgemm(int M, int N, int K, float* A, float* B, float* C)
{
// int M = outch; // outch
// int N = outw * outh; // outsize or out stride
// int L = kernel_w * kernel_h * inch; // ksize * inch
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
signed char* output0 = top_blob.channel(i);
signed char* output1 = top_blob.channel(i + 1);
signed char* output2 = top_blob.channel(i + 2);
signed char* output3 = top_blob.channel(i + 3);
const float bias0 = bias ? bias[i] : 0.f;
const float bias1 = bias ? bias[i + 1] : 0.f;
const float bias2 = bias ? bias[i + 2] : 0.f;
const float bias3 = bias ? bias[i + 3] : 0.f;
const float scale_requant_in0 = scale_requant[2 * i];
const float scale_requant_out0 = scale_requant[2 * i + 1];
const float scale_requant_in1 = scale_requant[2 * (i + 1)];
const float scale_requant_out1 = scale_requant[2 * (i + 1) + 1];
const float scale_requant_in2 = scale_requant[2 * (i + 2)];
const float scale_requant_out2 = scale_requant[2 * (i + 2) + 1];
const float scale_requant_in3 = scale_requant[2 * (i + 3)];
const float scale_requant_out3 = scale_requant[2 * (i + 3) + 1];
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char* vb = bottom_tm.channel(j / 4);
signed char* va = kernel_tm.channel(i / 4);
int sum0[4] = {0};
int sum1[4] = {0};
int sum2[4] = {0};
int sum3[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[2 * n]; // k0
sum0[n] += (int)va[1] * vb[2 * n + 1];
sum1[n] += (int)va[2] * vb[2 * n]; // k1
sum1[n] += (int)va[3] * vb[2 * n + 1];
sum2[n] += (int)va[4] * vb[2 * n]; // k2
sum2[n] += (int)va[5] * vb[2 * n + 1];
sum3[n] += (int)va[6] * vb[2 * n]; // k3
sum3[n] += (int)va[7] * vb[2 * n + 1];
}
va += 8;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += (int)va[0] * vb[n];
sum1[n] += (int)va[1] * vb[n];
sum2[n] += (int)va[2] * vb[n];
sum3[n] += (int)va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0);
output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1);
output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2);
output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3);
}
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
signed char* vb = bottom_tm.channel(j / 4 + j % 4);
signed char* va = kernel_tm.channel(i / 4);
int k = 0;
for (; k + 1 < K; k = k + 2)
{
sum0 += (int)va[0] * vb[0];
sum0 += (int)va[1] * vb[1];
sum1 += (int)va[2] * vb[0];
sum1 += (int)va[3] * vb[1];
sum2 += (int)va[4] * vb[0];
sum2 += (int)va[5] * vb[1];
sum3 += (int)va[6] * vb[0];
sum3 += (int)va[7] * vb[1];
va += 8;
vb += 2;
}
for (; k < K; k++)
{
sum0 += (int)va[0] * vb[0];
sum1 += (int)va[1] * vb[0];
sum2 += (int)va[2] * vb[0];
sum3 += (int)va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
output0++;
output1++;
output2++;
output3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_outch_start; i < outch; i++)
{
signed char* output = top_blob.channel(i);
const float bias0 = bias ? bias[i] : 0.f;
const float scale_requant_in0 = scale_requant[2 * i];
const float scale_requant_out0 = scale_requant[2 * i + 1];
int j = 0;
for (; j + 3 < N; j = j + 4)
{
signed char* vb = bottom_tm.channel(j / 4);
signed char* va = kernel_tm.channel(i / 4 + i % 4);
int sum[4] = {0};
int k = 0;
for (; k + 1 < K; k = k + 2)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[2 * n];
sum[n] += (int)va[1] * vb[2 * n + 1];
}
va += 2;
vb += 8;
}
for (; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += (int)va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0);
}
output += 4;
}
for (; j < N; j++)
{
int sum = 0;
signed char* vb = bottom_tm.channel(j / 4 + j % 4);
signed char* va = kernel_tm.channel(i / 4 + i % 4);
for (int k = 0; k < K; k++)
{
sum += (int)va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0);
output++;
}
}
}
// // sgemm(int M, int N, int K, float* A, float* B, float* C)
// {
// for (int i=0; i<M; i++)
// {
// int* output = top_blob.channel(i);
// for (int j=0; j<N; j++)
// {
// int sum = 0;
// signed char* vb = (signed char*)bottom_im2row + K * j;
// const signed char* va = kernel + K * i;
// for (int k=0; k<K; k++)
// {
// sum += (int)va[0] * vb[0];
// va += 1;
// vb += 1;
// }
// output[0] = sum;
// output++;
// }
// }
// }
}
|
pi-v1.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if !_DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if !_DEBUG_
start= omp_get_wtime();
#endif
/* do computation -- using all available threads */
// WARNING : incorrect code
#pragma omp parallel
{
#if _DEBUG_
int id = omp_get_thread_num();
#endif
for (i=0; i < num_steps; ++i) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
#if _DEBUG_
printf("thread id:%d it:%d\n",id,i);
#endif
}
}
pi = step * sum;
#if !_DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
DRB050-functionparameter-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Arrays passed as function parameters
*/
#include <omp.h>
void foo1(double o1[],double c[],int len)
{
int i;
#pragma omp parallel for private (i) firstprivate (len)
for (i = 0; i <= len - 1; i += 1) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
double o1[100];
double c[100];
int main()
{
int i;
#pragma omp parallel for private (i)
for (i = 0; i <= 99; i += 1) {
c[i] = i + 1.01;
o1[i] = i + 1.01;
}
foo1(o1,c,100);
for (i = 0; i <= 99; i += 1) {
printf("%lf\n",o1[i]);
}
return 0;
}
|
preprocessingInfo.c | int main (void)
{
int mits=5000;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
mits ++;
return 0;
}
|
3214.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
{
#pragma omp target teams distribute schedule(static, 8)
for (i = 0; i < _PB_NY; i++)
{
y[i] = 0;
}
#pragma omp target teams distribute schedule(static, 8)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
sbml_integration.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef SBML_INTEGRATION_H_
#define SBML_INTEGRATION_H_
#include "biodynamo.h"
#include "core/util/io.h"
#include "core/util/timing.h"
#include <TAxis.h>
#include <TCanvas.h>
#include <TFrame.h>
#include <TGraph.h>
#include <TMultiGraph.h>
#include <TPad.h>
#include <TLegend.h>
#include <TLegendEntry.h>
#include "rrException.h"
#include "rrExecutableModel.h"
#include "rrLogger.h"
#include "rrLogger.h"
#include "rrRoadRunner.h"
#include "rrUtils.h"
#include <math.h>
#include <fstream>
#include <stdlib.h> /*rand()*/
namespace bdm {
// Define my custom cell, which extends Cell by adding an extra
// data member compartment_, L_.
class MyCell : public Cell {
BDM_SIM_OBJECT_HEADER(MyCell, Cell, 1, compartment_, L_,A_0_ ,B_0_, p_ ,C_ , isBornAfterDivision_);
public:
MyCell() {}
explicit MyCell(const Double3& position) : Base(position) {}
/// Default event constructor
MyCell(const Event& event, SimObject* other, uint64_t new_oid = 0)
: Base(event, other, new_oid) {
if (auto* mother = dynamic_cast<MyCell*>(other)) {
if(mother -> GetIsBornAfterDivision()){
SetL(mother -> GetL());
SetA(mother -> GetA());
SetB(mother -> GetB());
SetC(mother -> GetC());
SetP(mother -> GetP());
}
}
}
void SetCompartment(double volume) { compartment_ = volume; }
double GetCompartment() const { return compartment_; }
void SetL(int l) { L_ = l; }
int GetL(){ return L_; }
void SetA(int a) { A_0_ = a; }
int GetA() const { return A_0_; }
void SetB(int b) { B_0_ = b; }
int GetB() const { return B_0_; }
void SetC(int c) { C_ = c; }
int GetC() const { return C_; }
void SetP(double p) { p_ = p; }
double GetP() const { return p_; }
void SetIsBornAfterDivision(bool flag){ isBornAfterDivision_ = flag;}
bool GetIsBornAfterDivision(){ return isBornAfterDivision_ ;}
void PrintValues(){
std::cout <<"L: "<<GetL() <<"\n"
<<"A: "<<GetA() <<"\n"
<<"B: "<<GetB() <<"\n"
<<"C: "<<GetC() <<"\n"
<<"p: "<<GetP() <<"\n"
<<"is born after division: "<<GetIsBornAfterDivision()<<std::endl;
}
private:
double compartment_ = 0;
int L_ = 0;
int A_0_ = 0;
int B_0_ = 0;
int C_ = 0;
double p_ = 0;
bool isBornAfterDivision_ = false;
};
// Define SbmlModule to simulate intracellular chemical reaction network.
struct SbmlModule : public BaseBiologyModule {
SbmlModule(const std::string& sbml_file, const rr::SimulateOptions& opt)
: BaseBiologyModule(gNullEventId, gNullEventId) {
rr_ = new rr::RoadRunner(sbml_file);
rr_->getSimulateOptions() = opt;
// setup integrator
rr_->setIntegrator("gillespie");
dt_ = opt.duration / opt.steps;
auto* integrator = rr_->getIntegrator();
integrator->setValue("variable_step_size", false);
integrator->setValue("initial_time_step", dt_);
integrator->setValue("maximum_time_step", dt_);
result_.resize(opt.steps, 8);
}
virtual ~SbmlModule() { delete rr_; }
SbmlModule(const Event& event, BaseBiologyModule* other, uint64_t new_oid = 0)
: BaseBiologyModule(event, other, new_oid) {}
/// Create a new instance of this object using the default constructor.
BaseBiologyModule* GetInstance(const Event& event, BaseBiologyModule* other,
uint64_t new_oid = 0) const override {
return new SbmlModule(event, other, new_oid);
}
/// Create a copy of this biology module.
BaseBiologyModule* GetCopy() const override { return new SbmlModule(*this); }
/// Default event handler (exising biology module won't be modified on
/// any event)
void EventHandler(const Event& event, BaseBiologyModule* other1,
BaseBiologyModule* other2 = nullptr) override {
BaseBiologyModule::EventHandler(event, other1, other2);
}
//Multiply all species by a value, excepts lipids "L" and "p"
void MultiplyAllSpecies(float value){
rr_ -> setValue("A_0", static_cast<int>(rr_ -> getValue("A_0")*value));
rr_ -> setValue("B_0", static_cast<int>(rr_ -> getValue("B_0")*value));
rr_ -> setValue("C", static_cast<int>(rr_ -> getValue("C")*value));
}
//Correct the value of all species
//FIXME: Errors are raised in the simulation
void UpdateSpecies(){
float A = rr_ -> getValue("A_0");
float B = rr_ -> getValue("B_0");
float C = rr_ -> getValue("C");
float p = rr_ -> getValue("p");
float v = rr_ -> getValue("compartment");
// std::cout << "A_0 " << -v*1e-19*A*B+v*1e+17
// << "\nB_0 " << -v*1e-19*A*B+v*1e+17
// << "\nC " << +v*1e-19*A*B
// << "L " << +v*1e-17*p*C << std::endl;
rr_ -> setValue("A_0",static_cast<int>(-v*1e-19*A*B+v*1e+17));
rr_ -> setValue("B_0",static_cast<int>(-v*1e-19*A*B+v*1e+17));
rr_ -> setValue("C",static_cast<int>(+v*1e-19*A*B));
rr_ -> setValue("L",static_cast<int>(+v*1e-17*p*C));
}
//Append volume value to text file
void SaveVolume(int t, float v){
#pragma omp critical
{
std::ofstream outfile;
outfile.open("volume.csv", std::ios_base::app); // append instead of overwrite
outfile << t<<";" << v << std::endl;
}
}
//update volume
void UpdateVolume(){
//float ro = 7.87870;
float ro =7.87870e+22;
//float r = 1e-6;
float delta = 1e-8;
float delta3 = pow(delta,3);
float L = rr_ -> getValue("L");
double newVolume = (1.0/6.0)*M_PI*delta3*pow(sqrt((L/(ro*M_PI*delta3)) -1.0/3.0)-1 ,3 );
//std::cout <<newVolume<<std::endl;
rr_ -> setValue("compartment",newVolume*1000);
}
void Run(SimObject* so) override {
if (auto* cell = static_cast<MyCell*>(so)) {
auto i = Simulation::GetActive()->GetScheduler()->GetSimulatedSteps();
/*First step random initialization*/
if(i == 1){
//rand 90-110
int randomSpeciesChange = rand() % 21 + 90;
std::cout << "First step, random init: "<< randomSpeciesChange << std::endl;
rr_ -> setValue("A_0", cell -> GetA() * randomSpeciesChange / 100);
rr_ -> setValue("B_0", cell -> GetB() * randomSpeciesChange / 100);
rr_ -> setValue("C", cell -> GetC() * randomSpeciesChange / 100);
rr_ -> setValue("L", cell -> GetL() * randomSpeciesChange / 100);
}
if(cell -> GetIsBornAfterDivision()){
// rand 90-110
int randomSpeciesChange = rand() % 21 + 90;
std::cout << "Random init: "<< randomSpeciesChange << std::endl;
cell -> SetIsBornAfterDivision(false);
rr_ -> setValue("A_0", cell -> GetA() * randomSpeciesChange / 100);
rr_ -> setValue("B_0", cell -> GetB() * randomSpeciesChange / 100);
rr_ -> setValue("C", cell -> GetC() * randomSpeciesChange / 100);
rr_ -> setValue("L", cell -> GetL() * randomSpeciesChange / 100);
}
rr_->getIntegrator()->integrate(0 * dt_, dt_);
cell -> SetCompartment(rr_ -> getValue("compartment"));
cell -> SetL(rr_ -> getValue("L"));
UpdateVolume();
const auto& partial_result = rr_->getFloatingSpeciesAmountsNamedArray();
result_(i, 0) = i * dt_;
for (unsigned j = 0; j < partial_result.numCols(); j++) {
result_(i, j + 1) = partial_result(0, j);
}
//UpdateSpecies();
if (cell -> GetL() > 20000 && active_){
//multiply lipids by 0.5
rr_ -> setValue("L", rr_ -> getValue("L")/2);
cell -> SetL(rr_ -> getValue("L"));
//UpdateSpecies();
// active_ = false; <- cells keep replicating
MultiplyAllSpecies(0.353553391);
//update Cell Values
cell -> SetA(rr_ -> getValue("A_0"));
cell -> SetB(rr_ -> getValue("B_0"));
cell -> SetC(rr_ -> getValue("C"));
cell -> SetP(rr_ -> getValue("p"));
//update volume of the cell and of the integrator
cell -> SetCompartment( cell -> GetCompartment()/2);
rr_ -> setValue("compartment", cell -> GetCompartment());
cell -> SetIsBornAfterDivision(true);
cell -> Divide();
}
}
}
const ls::DoubleMatrix& GetResult() const { return result_; }
private:
ls::DoubleMatrix result_;
bool active_ = true;
rr::RoadRunner* rr_;
double dt_;
BDM_CLASS_DEF_OVERRIDE(SbmlModule, 1);
};
inline void AddToPlot(TMultiGraph* mg, const ls::Matrix<double>* result) {
ls::Matrix<double> foo1(*result);
ls::Matrix<double> foo(*foo1.getTranspose());
int rows;
int cols;
auto** twod = foo.get2DMatrix(rows, cols);
TGraph* gr = new TGraph(cols, twod[0], twod[1]);
gr->SetLineColorAlpha(2, 0.1);
gr->SetLineWidth(1);
TGraph* gr1 = new TGraph(cols, twod[0], twod[2]);
gr1->SetLineColorAlpha(3, 0.1);
gr1->SetLineWidth(1);
TGraph* gr2 = new TGraph(cols, twod[0], twod[3]);
gr2->SetLineColorAlpha(4, 0.1);
gr2->SetLineWidth(1);
TGraph* gr3 = new TGraph(cols, twod[0], twod[4]);
gr3->SetLineColorAlpha(6, 0.1);
gr3->SetLineWidth(1);
TGraph* gr4 = new TGraph(cols, twod[0], twod[5]);
gr4->SetLineColorAlpha(8, 0.1);
gr4->SetLineWidth(1);
TGraph* gr5 = new TGraph(cols, twod[0], twod[6]);
gr5->SetLineColorAlpha(8, 0.1);
gr5->SetLineWidth(1);
TGraph* gr6 = new TGraph(cols, twod[0], twod[7]);
gr6->SetLineColorAlpha(9, 0.1);
gr6->SetLineWidth(1);
mg->Add(gr); //A
mg->Add(gr1); //B
mg->Add(gr2); //C
mg->Add(gr3); //L
// mg->Add(gr5); //Aext
// mg->Add(gr6); //Bext
//mg->Add(gr4);
mg->Draw("AL C C");
auto* legend = new TLegend(0.8,0.7,0.90,0.9);
// legend->SetHeader("The Legend Title","C");
TLegendEntry *le = legend->AddEntry(gr,"A","l");
le->SetTextColor(2);
TLegendEntry *le1 = legend->AddEntry(gr1,"B","l");
le1->SetTextColor(3);
TLegendEntry *le2 = legend->AddEntry(gr2,"C","l");
le2->SetTextColor(4);
TLegendEntry *le3 = legend->AddEntry(gr3,"L","l");
le3->SetTextColor(6);
legend -> Draw();
}
inline void PlotSbmlModules(const char* filename) {
// setup plot
TCanvas c;
c.SetGrid();
TMultiGraph* mg = new TMultiGraph();
mg->SetTitle("Elementare 6;Timestep;Concentration");
Simulation::GetActive()->GetResourceManager()->ApplyOnAllElements(
[&](SimObject* so) {
auto* cell = static_cast<MyCell*>(so);
const auto& bms = cell->GetAllBiologyModules();
if (bms.size() == 1) {
AddToPlot(mg, &static_cast<SbmlModule*>(bms[0])->GetResult());
}
});
// finalize plot
// TCanvas::Update() draws the frame, after which one can change it
c.Update();
c.GetFrame()->SetBorderSize(12);
gPad->Modified();
gPad->Update();
c.Modified();
c.cd(0);
// c.BuildLegend(); // TODO position of legend
c.SaveAs(filename);
}
inline int Simulate(int argc, const char** argv) {
auto opts = CommandLineOptions(argc, argv);
opts.AddOption<uint64_t>("n, num-cells", "10", "The total number of cells");
uint64_t num_cells = opts.Get<uint64_t>("num-cells");
// roadrunner options
rr::SimulateOptions opt;
opt.start = 0;
opt.duration = 500;
opt.steps = 2000;
auto set_param = [&](Param* param) {
param->simulation_time_step_ = opt.duration / opt.steps;
};
Simulation simulation(&opts, set_param);
std::string sbml_file = "../src/sbml_model.xml";
if (!FileExists(sbml_file)) {
sbml_file = "src/sbml_model.xml";
if (!FileExists(sbml_file)) {
Log::Error("Could not find sbml_model.xml file.");
}
}
// Define initial model
auto construct = [&](const Double3& position) {
auto* cell = new MyCell();
cell->SetPosition(position);
cell->SetDiameter(10);
cell->AddBiologyModule(new SbmlModule(sbml_file, opt));
return cell;
};
ModelInitializer::CreateCellsRandom(0, 200, num_cells, construct);
// Run simulation
auto start = Timing::Timestamp();
simulation.GetScheduler()->Simulate(opt.steps);
auto stop = Timing::Timestamp();
std::cout << "RUNTIME " << (stop - start) << std::endl;
PlotSbmlModules("sbml-modules.svg");
std::cout << "Simulation completed successfully!" << std::endl;
return 0;
}
} // namespace bdm
#endif // SBML_INTEGRATION_H_
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/animate.h"
#include "magick/animate.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/list.h"
#include "magick/image-private.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resource_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImageChannel method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImageChannel(Image *image,
% const ChannelType channel,const MagickEvaluateOperator op,
% const double value,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickPixelPacket **DestroyPixelThreadSet(MagickPixelPacket **pixels)
{
register ssize_t
i;
assert(pixels != (MagickPixelPacket **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (MagickPixelPacket *) NULL)
pixels[i]=(MagickPixelPacket *) RelinquishMagickMemory(pixels[i]);
pixels=(MagickPixelPacket **) RelinquishMagickMemory(pixels);
return(pixels);
}
static MagickPixelPacket **AcquirePixelThreadSet(const Image *image,
const size_t number_images)
{
register ssize_t
i,
j;
MagickPixelPacket
**pixels;
size_t
length,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(MagickPixelPacket **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (MagickPixelPacket **) NULL)
return((MagickPixelPacket **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
length=image->columns;
if (length < number_images)
length=number_images;
pixels[i]=(MagickPixelPacket *) AcquireQuantumMemory(length,
sizeof(**pixels));
if (pixels[i] == (MagickPixelPacket *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) length; j++)
GetMagickPixelPacket(image,&pixels[i][j]);
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const MagickPixelPacket
*color_1,
*color_2;
int
intensity;
color_1=(const MagickPixelPacket *) x;
color_2=(const MagickPixelPacket *) y;
intensity=(int) MagickPixelIntensity(color_2)-
(int) MagickPixelIntensity(color_1);
return(intensity);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
static MagickRealType ApplyEvaluateOperator(RandomInfo *random_info,
const Quantum pixel,const MagickEvaluateOperator op,
const MagickRealType value)
{
MagickRealType
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(MagickRealType) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a
positive result. It differs from % or fmod() which returns a
'truncated modulus' result, where floor() is replaced by trunc()
and could return a negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*exp((double) (value*QuantumScale*
pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
ImpulseNoise,value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(MagickRealType) (QuantumRange*log((double) (QuantumScale*value*
pixel+1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(MagickRealType) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(MagickRealType) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(MagickRealType) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
PoissonNoise,value);
break;
}
case PowEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*pow((double) (QuantumScale*pixel),
(double) value));
break;
}
case RightShiftEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(MagickRealType) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(MagickRealType) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(MagickRealType) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(MagickRealType) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 :
QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(MagickRealType) (((MagickRealType) pixel > value) ? QuantumRange :
pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(MagickRealType) GenerateDifferentialNoise(random_info,pixel,
UniformNoise,value);
break;
}
case XorEvaluateOperator:
{
result=(MagickRealType) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=EvaluateImageChannel(image,CompositeChannels,op,value,exception);
return(status);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**restrict evaluate_pixels,
zero;
RandomInfo
**restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images,number_images);
if (evaluate_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) number_images; i++)
evaluate_pixel[i]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
evaluate_pixel[i].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),op,evaluate_pixel[i].red);
evaluate_pixel[i].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),op,evaluate_pixel[i].green);
evaluate_pixel[i].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),op,evaluate_pixel[i].blue);
evaluate_pixel[i].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),op,evaluate_pixel[i].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[i].index=ApplyEvaluateOperator(random_info[id],
*indexes,op,evaluate_pixel[i].index);
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
SetPixelRed(q,ClampToQuantum(evaluate_pixel[i/2].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[i/2].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[i/2].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[i/2].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+i,ClampToQuantum(
evaluate_pixel[i/2].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict evaluate_indexes;
register ssize_t
i,
x;
register MagickPixelPacket
*evaluate_pixel;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,
image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_indexes=GetCacheViewAuthenticIndexQueue(evaluate_view);
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
evaluate_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) next->columns; x++)
{
evaluate_pixel[x].red=ApplyEvaluateOperator(random_info[id],
GetPixelRed(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].red);
evaluate_pixel[x].green=ApplyEvaluateOperator(random_info[id],
GetPixelGreen(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].green);
evaluate_pixel[x].blue=ApplyEvaluateOperator(random_info[id],
GetPixelBlue(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].blue);
evaluate_pixel[x].opacity=ApplyEvaluateOperator(random_info[id],
GetPixelAlpha(p),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].opacity);
if (image->colorspace == CMYKColorspace)
evaluate_pixel[x].index=ApplyEvaluateOperator(random_info[id],
GetPixelIndex(indexes+x),i == 0 ? AddEvaluateOperator : op,
evaluate_pixel[x].index);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (op == MeanEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red/=number_images;
evaluate_pixel[x].green/=number_images;
evaluate_pixel[x].blue/=number_images;
evaluate_pixel[x].opacity/=number_images;
evaluate_pixel[x].index/=number_images;
}
if (op == RootMeanSquareEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
evaluate_pixel[x].red=sqrt(evaluate_pixel[x].red/number_images);
evaluate_pixel[x].green=sqrt(evaluate_pixel[x].green/number_images);
evaluate_pixel[x].blue=sqrt(evaluate_pixel[x].blue/number_images);
evaluate_pixel[x].opacity=sqrt(evaluate_pixel[x].opacity/
number_images);
evaluate_pixel[x].index=sqrt(evaluate_pixel[x].index/number_images);
}
if (op == MultiplyEvaluateOperator)
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
{
evaluate_pixel[x].red*=(MagickRealType) QuantumScale;
evaluate_pixel[x].green*=(MagickRealType) QuantumScale;
evaluate_pixel[x].blue*=(MagickRealType) QuantumScale;
evaluate_pixel[x].opacity*=(MagickRealType) QuantumScale;
evaluate_pixel[x].index*=(MagickRealType) QuantumScale;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(evaluate_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(evaluate_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(evaluate_pixel[x].blue));
SetPixelAlpha(q,ClampToQuantum(evaluate_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(evaluate_indexes+x,ClampToQuantum(
evaluate_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImageChannel(Image *image,
const ChannelType channel,const MagickEvaluateOperator op,const double value,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#endif
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
result;
if ((channel & RedChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelRed(q),op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelRed(q,ClampToQuantum(result));
}
if ((channel & GreenChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelGreen(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelGreen(q,ClampToQuantum(result));
}
if ((channel & BlueChannel) != 0)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelBlue(q),op,
value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelBlue(q,ClampToQuantum(result));
}
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
{
result=ApplyEvaluateOperator(random_info[id],GetPixelOpacity(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelOpacity(q,ClampToQuantum(result));
}
else
{
result=ApplyEvaluateOperator(random_info[id],GetPixelAlpha(q),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelAlpha(q,ClampToQuantum(result));
}
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
{
result=ApplyEvaluateOperator(random_info[id],GetPixelIndex(indexes+x),
op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
SetPixelIndex(indexes+x,ClampToQuantum(result));
}
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImageChannel)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImageChannel method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
% MagickBooleanType FunctionImageChannel(Image *image,
% const ChannelType channel,const MagickFunction function,
% const ssize_t number_parameters,const double *argument,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
MagickRealType
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
* Polynomial
* Parameters: polynomial constants, highest to lowest order
* For example: c0*x^3 + c1*x^2 + c2*x + c3
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel + parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
/* Sinusoid Function
* Parameters: Freq, Phase, Ampl, bias
*/
double freq,phase,ampl,bias;
freq = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
phase = ( number_parameters >= 2 ) ? parameters[1] : 0.0;
ampl = ( number_parameters >= 3 ) ? parameters[2] : 0.5;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (QuantumRange*(ampl*sin((double) (2.0*MagickPI*
(freq*QuantumScale*pixel + phase/360.0) )) + bias ) );
break;
}
case ArcsinFunction:
{
/* Arcsin Function (peged at range limits for invalid results)
* Parameters: Width, Center, Range, Bias
*/
double width,range,center,bias;
width = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result = 2.0/width*(QuantumScale*pixel - center);
if ( result <= -1.0 )
result = bias - range/2.0;
else if ( result >= 1.0 )
result = bias + range/2.0;
else
result=(MagickRealType) (range/MagickPI*asin((double) result)+bias);
result *= QuantumRange;
break;
}
case ArctanFunction:
{
/* Arctan Function
* Parameters: Slope, Center, Range, Bias
*/
double slope,range,center,bias;
slope = ( number_parameters >= 1 ) ? parameters[0] : 1.0;
center = ( number_parameters >= 2 ) ? parameters[1] : 0.5;
range = ( number_parameters >= 3 ) ? parameters[2] : 1.0;
bias = ( number_parameters >= 4 ) ? parameters[3] : 0.5;
result=(MagickRealType) (MagickPI*slope*(QuantumScale*pixel-center));
result=(MagickRealType) (QuantumRange*(range/MagickPI*atan((double)
result) + bias ) );
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FunctionImageChannel(image,CompositeChannels,function,
number_parameters,parameters,exception);
return(status);
}
MagickExport MagickBooleanType FunctionImageChannel(Image *image,
const ChannelType channel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
return(MagickFalse);
}
status=AccelerateFunctionImage(image,channel,function,number_parameters,
parameters,exception);
if (status != MagickFalse)
return(status);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ApplyFunction(GetPixelRed(q),function,
number_parameters,parameters,exception));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ApplyFunction(GetPixelGreen(q),function,
number_parameters,parameters,exception));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ApplyFunction(GetPixelBlue(q),function,
number_parameters,parameters,exception));
if ((channel & OpacityChannel) != 0)
{
if (image->matte == MagickFalse)
SetPixelOpacity(q,ApplyFunction(GetPixelOpacity(q),function,
number_parameters,parameters,exception));
else
SetPixelAlpha(q,ApplyFunction((Quantum) GetPixelAlpha(q),function,
number_parameters,parameters,exception));
}
if (((channel & IndexChannel) != 0) && (indexes != (IndexPacket *) NULL))
SetPixelIndex(indexes+x,ApplyFunction(GetPixelIndex(indexes+x),function,
number_parameters,parameters,exception));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImageChannel)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e C h a n n e l E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageChannelExtrema method is:
%
% MagickBooleanType GetImageChannelExtrema(const Image *image,
% const ChannelType channel,size_t *minima,size_t *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
return(GetImageChannelExtrema(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelExtrema(const Image *image,
const ChannelType channel,size_t *minima,size_t *maxima,
ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageChannelRange(image,channel,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelKurtosis() returns the kurtosis and skewness of one or more
% image channels.
%
% The format of the GetImageChannelKurtosis method is:
%
% MagickBooleanType GetImageChannelKurtosis(const Image *image,
% const ChannelType channel,double *kurtosis,double *skewness,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelKurtosis(image,CompositeChannels,kurtosis,skewness,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelKurtosis(const Image *image,
const ChannelType channel,double *kurtosis,double *skewness,
ExceptionInfo *exception)
{
double
area,
mean,
standard_deviation,
sum_squares,
sum_cubes,
sum_fourth_power;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*kurtosis=0.0;
*skewness=0.0;
area=0.0;
mean=0.0;
standard_deviation=0.0;
sum_squares=0.0;
sum_cubes=0.0;
sum_fourth_power=0.0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
mean+=GetPixelRed(p);
sum_squares+=(double) GetPixelRed(p)*GetPixelRed(p);
sum_cubes+=(double) GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
sum_fourth_power+=(double) GetPixelRed(p)*GetPixelRed(p)*
GetPixelRed(p)*GetPixelRed(p);
area++;
}
if ((channel & GreenChannel) != 0)
{
mean+=GetPixelGreen(p);
sum_squares+=(double) GetPixelGreen(p)*GetPixelGreen(p);
sum_cubes+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p);
sum_fourth_power+=(double) GetPixelGreen(p)*GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
area++;
}
if ((channel & BlueChannel) != 0)
{
mean+=GetPixelBlue(p);
sum_squares+=(double) GetPixelBlue(p)*GetPixelBlue(p);
sum_cubes+=(double) GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
sum_fourth_power+=(double) GetPixelBlue(p)*GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
area++;
}
if ((channel & OpacityChannel) != 0)
{
mean+=GetPixelOpacity(p);
sum_squares+=(double) GetPixelOpacity(p)*GetPixelOpacity(p);
sum_cubes+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
sum_fourth_power+=(double) GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p)*GetPixelOpacity(p);
area++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
mean+=GetPixelIndex(indexes+x);
sum_squares+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_cubes+=(double) GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
sum_fourth_power+=(double) GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
area++;
}
p++;
}
}
if (y < (ssize_t) image->rows)
return(MagickFalse);
if (area != 0.0)
{
mean/=area;
sum_squares/=area;
sum_cubes/=area;
sum_fourth_power/=area;
}
standard_deviation=sqrt(sum_squares-(mean*mean));
if (standard_deviation != 0.0)
{
*kurtosis=sum_fourth_power-4.0*mean*sum_cubes+6.0*mean*mean*sum_squares-
3.0*mean*mean*mean*mean;
*kurtosis/=standard_deviation*standard_deviation*standard_deviation*
standard_deviation;
*kurtosis-=3.0;
*skewness=sum_cubes-3.0*mean*sum_squares+2.0*mean*mean*mean;
*skewness/=standard_deviation*standard_deviation*standard_deviation;
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMean() returns the mean and standard deviation of one or more
% image channels.
%
% The format of the GetImageChannelMean method is:
%
% MagickBooleanType GetImageChannelMean(const Image *image,
% const ChannelType channel,double *mean,double *standard_deviation,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=GetImageChannelMean(image,CompositeChannels,mean,standard_deviation,
exception);
return(status);
}
MagickExport MagickBooleanType GetImageChannelMean(const Image *image,
const ChannelType channel,double *mean,double *standard_deviation,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
size_t
channels;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageChannelStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
channels=0;
channel_statistics[CompositeChannels].mean=0.0;
channel_statistics[CompositeChannels].standard_deviation=0.0;
if ((channel & RedChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[RedChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[RedChannel].variance-
channel_statistics[RedChannel].mean*
channel_statistics[RedChannel].mean;
channels++;
}
if ((channel & GreenChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[GreenChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[GreenChannel].variance-
channel_statistics[GreenChannel].mean*
channel_statistics[GreenChannel].mean;
channels++;
}
if ((channel & BlueChannel) != 0)
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlueChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlueChannel].variance-
channel_statistics[BlueChannel].mean*
channel_statistics[BlueChannel].mean;
channels++;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[OpacityChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[OpacityChannel].variance-
channel_statistics[OpacityChannel].mean*
channel_statistics[OpacityChannel].mean;
channels++;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_statistics[CompositeChannels].mean+=
channel_statistics[BlackChannel].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[BlackChannel].variance-
channel_statistics[BlackChannel].mean*
channel_statistics[BlackChannel].mean;
channels++;
}
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
*mean=channel_statistics[CompositeChannels].mean;
*standard_deviation=channel_statistics[CompositeChannels].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageChannelMoments method is:
%
% ChannelMoments *GetImageChannelMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelMoments *GetImageChannelMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
ChannelMoments
*channel_moments;
double
M00[CompositeChannels+1],
M01[CompositeChannels+1],
M02[CompositeChannels+1],
M03[CompositeChannels+1],
M10[CompositeChannels+1],
M11[CompositeChannels+1],
M12[CompositeChannels+1],
M20[CompositeChannels+1],
M21[CompositeChannels+1],
M22[CompositeChannels+1],
M30[CompositeChannels+1];
MagickPixelPacket
pixel;
PointInfo
centroid[CompositeChannels+1];
ssize_t
channel,
channels,
y;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_moments=(ChannelMoments *) AcquireQuantumMemory(length,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) ResetMagickMemory(channel_moments,0,length*sizeof(*channel_moments));
(void) ResetMagickMemory(centroid,0,sizeof(centroid));
(void) ResetMagickMemory(M00,0,sizeof(M00));
(void) ResetMagickMemory(M01,0,sizeof(M01));
(void) ResetMagickMemory(M02,0,sizeof(M02));
(void) ResetMagickMemory(M03,0,sizeof(M03));
(void) ResetMagickMemory(M10,0,sizeof(M10));
(void) ResetMagickMemory(M11,0,sizeof(M11));
(void) ResetMagickMemory(M12,0,sizeof(M12));
(void) ResetMagickMemory(M20,0,sizeof(M20));
(void) ResetMagickMemory(M21,0,sizeof(M21));
(void) ResetMagickMemory(M22,0,sizeof(M22));
(void) ResetMagickMemory(M30,0,sizeof(M30));
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M00[RedChannel]+=QuantumScale*pixel.red;
M10[RedChannel]+=x*QuantumScale*pixel.red;
M01[RedChannel]+=y*QuantumScale*pixel.red;
M00[GreenChannel]+=QuantumScale*pixel.green;
M10[GreenChannel]+=x*QuantumScale*pixel.green;
M01[GreenChannel]+=y*QuantumScale*pixel.green;
M00[BlueChannel]+=QuantumScale*pixel.blue;
M10[BlueChannel]+=x*QuantumScale*pixel.blue;
M01[BlueChannel]+=y*QuantumScale*pixel.blue;
if (image->matte != MagickFalse)
{
M00[OpacityChannel]+=QuantumScale*pixel.opacity;
M10[OpacityChannel]+=x*QuantumScale*pixel.opacity;
M01[OpacityChannel]+=y*QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M00[IndexChannel]+=QuantumScale*pixel.index;
M10[IndexChannel]+=x*QuantumScale*pixel.index;
M01[IndexChannel]+=y*QuantumScale*pixel.index;
}
p++;
}
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
M11[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M20[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*QuantumScale*pixel.red;
M02[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M21[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M12[RedChannel]+=(x-centroid[RedChannel].x)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M22[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*QuantumScale*pixel.red;
M30[RedChannel]+=(x-centroid[RedChannel].x)*(x-
centroid[RedChannel].x)*(x-centroid[RedChannel].x)*QuantumScale*
pixel.red;
M03[RedChannel]+=(y-centroid[RedChannel].y)*(y-
centroid[RedChannel].y)*(y-centroid[RedChannel].y)*QuantumScale*
pixel.red;
M11[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M20[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*QuantumScale*pixel.green;
M02[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M21[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M12[GreenChannel]+=(x-centroid[GreenChannel].x)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M22[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*QuantumScale*pixel.green;
M30[GreenChannel]+=(x-centroid[GreenChannel].x)*(x-
centroid[GreenChannel].x)*(x-centroid[GreenChannel].x)*QuantumScale*
pixel.green;
M03[GreenChannel]+=(y-centroid[GreenChannel].y)*(y-
centroid[GreenChannel].y)*(y-centroid[GreenChannel].y)*QuantumScale*
pixel.green;
M11[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M20[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*QuantumScale*pixel.blue;
M02[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M21[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M12[BlueChannel]+=(x-centroid[BlueChannel].x)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
M22[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*QuantumScale*pixel.blue;
M30[BlueChannel]+=(x-centroid[BlueChannel].x)*(x-
centroid[BlueChannel].x)*(x-centroid[BlueChannel].x)*QuantumScale*
pixel.blue;
M03[BlueChannel]+=(y-centroid[BlueChannel].y)*(y-
centroid[BlueChannel].y)*(y-centroid[BlueChannel].y)*QuantumScale*
pixel.blue;
if (image->matte != MagickFalse)
{
M11[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M20[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*QuantumScale*pixel.opacity;
M02[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M21[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M12[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
M22[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*QuantumScale*pixel.opacity;
M30[OpacityChannel]+=(x-centroid[OpacityChannel].x)*(x-
centroid[OpacityChannel].x)*(x-centroid[OpacityChannel].x)*
QuantumScale*pixel.opacity;
M03[OpacityChannel]+=(y-centroid[OpacityChannel].y)*(y-
centroid[OpacityChannel].y)*(y-centroid[OpacityChannel].y)*
QuantumScale*pixel.opacity;
}
if (image->colorspace == CMYKColorspace)
{
M11[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M20[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*QuantumScale*pixel.index;
M02[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M21[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M12[IndexChannel]+=(x-centroid[IndexChannel].x)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
M22[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*QuantumScale*pixel.index;
M30[IndexChannel]+=(x-centroid[IndexChannel].x)*(x-
centroid[IndexChannel].x)*(x-centroid[IndexChannel].x)*
QuantumScale*pixel.index;
M03[IndexChannel]+=(y-centroid[IndexChannel].y)*(y-
centroid[IndexChannel].y)*(y-centroid[IndexChannel].y)*
QuantumScale*pixel.index;
}
p++;
}
}
channels=3;
M00[CompositeChannels]+=(M00[RedChannel]+M00[GreenChannel]+M00[BlueChannel]);
M01[CompositeChannels]+=(M01[RedChannel]+M01[GreenChannel]+M01[BlueChannel]);
M02[CompositeChannels]+=(M02[RedChannel]+M02[GreenChannel]+M02[BlueChannel]);
M03[CompositeChannels]+=(M03[RedChannel]+M03[GreenChannel]+M03[BlueChannel]);
M10[CompositeChannels]+=(M10[RedChannel]+M10[GreenChannel]+M10[BlueChannel]);
M11[CompositeChannels]+=(M11[RedChannel]+M11[GreenChannel]+M11[BlueChannel]);
M12[CompositeChannels]+=(M12[RedChannel]+M12[GreenChannel]+M12[BlueChannel]);
M20[CompositeChannels]+=(M20[RedChannel]+M20[GreenChannel]+M20[BlueChannel]);
M21[CompositeChannels]+=(M21[RedChannel]+M21[GreenChannel]+M21[BlueChannel]);
M22[CompositeChannels]+=(M22[RedChannel]+M22[GreenChannel]+M22[BlueChannel]);
M30[CompositeChannels]+=(M30[RedChannel]+M30[GreenChannel]+M30[BlueChannel]);
if (image->matte != MagickFalse)
{
channels+=1;
M00[CompositeChannels]+=M00[OpacityChannel];
M01[CompositeChannels]+=M01[OpacityChannel];
M02[CompositeChannels]+=M02[OpacityChannel];
M03[CompositeChannels]+=M03[OpacityChannel];
M10[CompositeChannels]+=M10[OpacityChannel];
M11[CompositeChannels]+=M11[OpacityChannel];
M12[CompositeChannels]+=M12[OpacityChannel];
M20[CompositeChannels]+=M20[OpacityChannel];
M21[CompositeChannels]+=M21[OpacityChannel];
M22[CompositeChannels]+=M22[OpacityChannel];
M30[CompositeChannels]+=M30[OpacityChannel];
}
if (image->colorspace == CMYKColorspace)
{
channels+=1;
M00[CompositeChannels]+=M00[IndexChannel];
M01[CompositeChannels]+=M01[IndexChannel];
M02[CompositeChannels]+=M02[IndexChannel];
M03[CompositeChannels]+=M03[IndexChannel];
M10[CompositeChannels]+=M10[IndexChannel];
M11[CompositeChannels]+=M11[IndexChannel];
M12[CompositeChannels]+=M12[IndexChannel];
M20[CompositeChannels]+=M20[IndexChannel];
M21[CompositeChannels]+=M21[IndexChannel];
M22[CompositeChannels]+=M22[IndexChannel];
M30[CompositeChannels]+=M30[IndexChannel];
}
M00[CompositeChannels]/=(double) channels;
M01[CompositeChannels]/=(double) channels;
M02[CompositeChannels]/=(double) channels;
M03[CompositeChannels]/=(double) channels;
M10[CompositeChannels]/=(double) channels;
M11[CompositeChannels]/=(double) channels;
M12[CompositeChannels]/=(double) channels;
M20[CompositeChannels]/=(double) channels;
M21[CompositeChannels]/=(double) channels;
M22[CompositeChannels]/=(double) channels;
M30[CompositeChannels]/=(double) channels;
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
for (channel=0; channel <= CompositeChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].I[0]=M20[channel]+M02[channel];
channel_moments[channel].I[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].I[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].I[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].I[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].I[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].I[7]=M11[channel]*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelPerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImageChannelPerceptualHash method is:
%
% ChannelPerceptualHash *GetImageChannelPerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImageChannelPerceptualHash(
const Image *image,ExceptionInfo *exception)
{
ChannelMoments
*moments;
ChannelPerceptualHash
*perceptual_hash;
Image
*hash_image;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
channel;
/*
Blur then transform to sRGB colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
return((ChannelPerceptualHash *) NULL);
hash_image->depth=8;
status=TransformImageColorspace(hash_image,sRGBColorspace);
if (status == MagickFalse)
return((ChannelPerceptualHash *) NULL);
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
return((ChannelPerceptualHash *) NULL);
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
CompositeChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].P[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
/*
Blur then transform to HCLp colorspace.
*/
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
hash_image->depth=8;
status=TransformImageColorspace(hash_image,HCLpColorspace);
if (status == MagickFalse)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
moments=GetImageChannelMoments(hash_image,exception);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
{
perceptual_hash=(ChannelPerceptualHash *) RelinquishMagickMemory(
perceptual_hash);
return((ChannelPerceptualHash *) NULL);
}
for (channel=0; channel <= CompositeChannels; channel++)
for (i=0; i < MaximumNumberOfImageMoments; i++)
perceptual_hash[channel].Q[i]=(-MagickLog10(moments[channel].I[i]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelRange() returns the range of one or more image channels.
%
% The format of the GetImageChannelRange method is:
%
% MagickBooleanType GetImageChannelRange(const Image *image,
% const ChannelType channel,double *minima,double *maxima,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,
double *minima,double *maxima,ExceptionInfo *exception)
{
return(GetImageChannelRange(image,CompositeChannels,minima,maxima,exception));
}
MagickExport MagickBooleanType GetImageChannelRange(const Image *image,
const ChannelType channel,double *minima,double *maxima,
ExceptionInfo *exception)
{
MagickPixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
*maxima=(-MagickMaximumValue);
*minima=MagickMaximumValue;
GetMagickPixelPacket(image,&pixel);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((channel & RedChannel) != 0)
{
if (pixel.red < *minima)
*minima=(double) pixel.red;
if (pixel.red > *maxima)
*maxima=(double) pixel.red;
}
if ((channel & GreenChannel) != 0)
{
if (pixel.green < *minima)
*minima=(double) pixel.green;
if (pixel.green > *maxima)
*maxima=(double) pixel.green;
}
if ((channel & BlueChannel) != 0)
{
if (pixel.blue < *minima)
*minima=(double) pixel.blue;
if (pixel.blue > *maxima)
*maxima=(double) pixel.blue;
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
if (pixel.opacity < *minima)
*minima=(double) pixel.opacity;
if (pixel.opacity > *maxima)
*maxima=(double) pixel.opacity;
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
if ((double) pixel.index < *minima)
*minima=(double) pixel.index;
if ((double) pixel.index > *maxima)
*maxima=(double) pixel.index;
}
p++;
}
}
return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelStatistics() returns statistics for each channel in the
% image. The statistics include the channel depth, its minima, maxima, mean,
% standard deviation, kurtosis and skewness. You can access the red channel
% mean, for example, like this:
%
% channel_statistics=GetImageChannelStatistics(image,exception);
% red_mean=channel_statistics[RedChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageChannelStatistics method is:
%
% ChannelStatistics *GetImageChannelStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageChannelStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area;
QuantumAny
range;
register ssize_t
i;
size_t
channels,
depth,
length;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
length=CompositeChannels+1UL;
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(length,
sizeof(*channel_statistics));
if (channel_statistics == (ChannelStatistics *) NULL)
return(channel_statistics);
(void) ResetMagickMemory(channel_statistics,0,length*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; )
{
if (channel_statistics[RedChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[RedChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
{
channel_statistics[RedChannel].depth++;
continue;
}
}
if (channel_statistics[GreenChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[GreenChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
{
channel_statistics[GreenChannel].depth++;
continue;
}
}
if (channel_statistics[BlueChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlueChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
{
channel_statistics[BlueChannel].depth++;
continue;
}
}
if (image->matte != MagickFalse)
{
if (channel_statistics[OpacityChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[OpacityChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse)
{
channel_statistics[OpacityChannel].depth++;
continue;
}
}
}
if (image->colorspace == CMYKColorspace)
{
if (channel_statistics[BlackChannel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[BlackChannel].depth;
range=GetQuantumRange(depth);
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
{
channel_statistics[BlackChannel].depth++;
continue;
}
}
}
if ((double) GetPixelRed(p) < channel_statistics[RedChannel].minima)
channel_statistics[RedChannel].minima=(double) GetPixelRed(p);
if ((double) GetPixelRed(p) > channel_statistics[RedChannel].maxima)
channel_statistics[RedChannel].maxima=(double) GetPixelRed(p);
channel_statistics[RedChannel].sum+=GetPixelRed(p);
channel_statistics[RedChannel].sum_squared+=(double) GetPixelRed(p)*
GetPixelRed(p);
channel_statistics[RedChannel].sum_cubed+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
channel_statistics[RedChannel].sum_fourth_power+=(double)
GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p)*GetPixelRed(p);
if ((double) GetPixelGreen(p) < channel_statistics[GreenChannel].minima)
channel_statistics[GreenChannel].minima=(double) GetPixelGreen(p);
if ((double) GetPixelGreen(p) > channel_statistics[GreenChannel].maxima)
channel_statistics[GreenChannel].maxima=(double) GetPixelGreen(p);
channel_statistics[GreenChannel].sum+=GetPixelGreen(p);
channel_statistics[GreenChannel].sum_squared+=(double) GetPixelGreen(p)*
GetPixelGreen(p);
channel_statistics[GreenChannel].sum_cubed+=(double) GetPixelGreen(p)*
GetPixelGreen(p)*GetPixelGreen(p);
channel_statistics[GreenChannel].sum_fourth_power+=(double)
GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p)*GetPixelGreen(p);
if ((double) GetPixelBlue(p) < channel_statistics[BlueChannel].minima)
channel_statistics[BlueChannel].minima=(double) GetPixelBlue(p);
if ((double) GetPixelBlue(p) > channel_statistics[BlueChannel].maxima)
channel_statistics[BlueChannel].maxima=(double) GetPixelBlue(p);
channel_statistics[BlueChannel].sum+=GetPixelBlue(p);
channel_statistics[BlueChannel].sum_squared+=(double) GetPixelBlue(p)*
GetPixelBlue(p);
channel_statistics[BlueChannel].sum_cubed+=(double) GetPixelBlue(p)*
GetPixelBlue(p)*GetPixelBlue(p);
channel_statistics[BlueChannel].sum_fourth_power+=(double)
GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p)*GetPixelBlue(p);
if (image->matte != MagickFalse)
{
if ((double) GetPixelOpacity(p) < channel_statistics[OpacityChannel].minima)
channel_statistics[OpacityChannel].minima=(double)
GetPixelOpacity(p);
if ((double) GetPixelOpacity(p) > channel_statistics[OpacityChannel].maxima)
channel_statistics[OpacityChannel].maxima=(double)
GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum+=GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_squared+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_cubed+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p);
channel_statistics[OpacityChannel].sum_fourth_power+=(double)
GetPixelOpacity(p)*GetPixelOpacity(p)*GetPixelOpacity(p)*
GetPixelOpacity(p);
}
if (image->colorspace == CMYKColorspace)
{
if ((double) GetPixelIndex(indexes+x) < channel_statistics[BlackChannel].minima)
channel_statistics[BlackChannel].minima=(double)
GetPixelIndex(indexes+x);
if ((double) GetPixelIndex(indexes+x) > channel_statistics[BlackChannel].maxima)
channel_statistics[BlackChannel].maxima=(double)
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum+=GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_squared+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_cubed+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x);
channel_statistics[BlackChannel].sum_fourth_power+=(double)
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x)*
GetPixelIndex(indexes+x)*GetPixelIndex(indexes+x);
}
x++;
p++;
}
}
area=(double) image->columns*image->rows;
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
double
mean;
mean=channel_statistics[i].sum/area;
channel_statistics[i].sum=mean;
channel_statistics[i].sum_squared/=area;
channel_statistics[i].sum_cubed/=area;
channel_statistics[i].sum_fourth_power/=area;
channel_statistics[i].mean=mean;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
channel_statistics[i].standard_deviation=sqrt(
channel_statistics[i].variance-(mean*mean));
}
for (i=0; i < (ssize_t) CompositeChannels; i++)
{
channel_statistics[CompositeChannels].depth=(size_t) EvaluateMax((double)
channel_statistics[CompositeChannels].depth,(double)
channel_statistics[i].depth);
channel_statistics[CompositeChannels].minima=MagickMin(
channel_statistics[CompositeChannels].minima,
channel_statistics[i].minima);
channel_statistics[CompositeChannels].maxima=EvaluateMax(
channel_statistics[CompositeChannels].maxima,
channel_statistics[i].maxima);
channel_statistics[CompositeChannels].sum+=channel_statistics[i].sum;
channel_statistics[CompositeChannels].sum_squared+=
channel_statistics[i].sum_squared;
channel_statistics[CompositeChannels].sum_cubed+=
channel_statistics[i].sum_cubed;
channel_statistics[CompositeChannels].sum_fourth_power+=
channel_statistics[i].sum_fourth_power;
channel_statistics[CompositeChannels].mean+=channel_statistics[i].mean;
channel_statistics[CompositeChannels].variance+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
channel_statistics[CompositeChannels].standard_deviation+=
channel_statistics[i].variance-channel_statistics[i].mean*
channel_statistics[i].mean;
}
channels=3;
if (image->matte != MagickFalse)
channels++;
if (image->colorspace == CMYKColorspace)
channels++;
channel_statistics[CompositeChannels].sum/=channels;
channel_statistics[CompositeChannels].sum_squared/=channels;
channel_statistics[CompositeChannels].sum_cubed/=channels;
channel_statistics[CompositeChannels].sum_fourth_power/=channels;
channel_statistics[CompositeChannels].mean/=channels;
channel_statistics[CompositeChannels].variance/=channels;
channel_statistics[CompositeChannels].standard_deviation=
sqrt(channel_statistics[CompositeChannels].standard_deviation/channels);
channel_statistics[CompositeChannels].kurtosis/=channels;
channel_statistics[CompositeChannels].skewness/=channels;
for (i=0; i <= (ssize_t) CompositeChannels; i++)
{
if (channel_statistics[i].standard_deviation == 0.0)
continue;
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-
3.0*channel_statistics[i].mean*channel_statistics[i].sum_squared+
2.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-
4.0*channel_statistics[i].mean*channel_statistics[i].sum_cubed+
6.0*channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)/(channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation*
channel_statistics[i].standard_deviation)-3.0;
}
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
% Image *PolynomialImageChannel(const Image *images,
% const size_t number_terms,const ChannelType channel,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o channel: the channel.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
Image
*polynomial_image;
polynomial_image=PolynomialImageChannel(images,DefaultChannels,number_terms,
terms,exception);
return(polynomial_image);
}
MagickExport Image *PolynomialImageChannel(const Image *images,
const ChannelType channel,const size_t number_terms,const double *terms,
ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
**restrict polynomial_pixels,
zero;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=CloneImage(images,images->columns,images->rows,MagickTrue,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
{
InheritException(exception,&image->exception);
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images,number_images);
if (polynomial_pixels == (MagickPixelPacket **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(images,&zero);
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register IndexPacket
*restrict polynomial_indexes;
register MagickPixelPacket
*polynomial_pixel;
register PixelPacket
*restrict q;
register ssize_t
i,
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_indexes=GetCacheViewAuthenticIndexQueue(polynomial_view);
polynomial_pixel=polynomial_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
polynomial_pixel[x]=zero;
next=images;
for (i=0; i < (ssize_t) number_images; i++)
{
register const IndexPacket
*indexes;
register const PixelPacket
*p;
if (i >= (ssize_t) number_terms)
break;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
coefficient,
degree;
coefficient=terms[i << 1];
degree=terms[(i << 1)+1];
if ((channel & RedChannel) != 0)
polynomial_pixel[x].red+=coefficient*pow(QuantumScale*p->red,degree);
if ((channel & GreenChannel) != 0)
polynomial_pixel[x].green+=coefficient*pow(QuantumScale*p->green,
degree);
if ((channel & BlueChannel) != 0)
polynomial_pixel[x].blue+=coefficient*pow(QuantumScale*p->blue,
degree);
if ((channel & OpacityChannel) != 0)
polynomial_pixel[x].opacity+=coefficient*pow(QuantumScale*
(QuantumRange-p->opacity),degree);
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
polynomial_pixel[x].index+=coefficient*pow(QuantumScale*indexes[x],
degree);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].red));
SetPixelGreen(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].green));
SetPixelBlue(q,ClampToQuantum(QuantumRange*polynomial_pixel[x].blue));
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
else
SetPixelAlpha(q,ClampToQuantum(QuantumRange-QuantumRange*
polynomial_pixel[x].opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(polynomial_indexes+x,ClampToQuantum(QuantumRange*
polynomial_pixel[x].index));
q++;
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PolynomialImages)
#endif
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
% Image *StatisticImageChannel(const Image *image,
% const ChannelType channel,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the image channel.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
#define ListChannels 5
typedef struct _ListNode
{
size_t
next[9],
count,
signature;
} ListNode;
typedef struct _SkipList
{
ssize_t
level;
ListNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed,
signature;
SkipList
lists[ListChannels];
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
register ssize_t
i;
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
for (i=0; i < ListChannels; i++)
if (pixel_list->lists[i].nodes != (ListNode *) NULL)
pixel_list->lists[i].nodes=(ListNode *) RelinquishMagickMemory(
pixel_list->lists[i].nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
register ssize_t
i;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
for (i=0; i < ListChannels; i++)
{
pixel_list->lists[i].nodes=(ListNode *) AcquireQuantumMemory(65537UL,
sizeof(*pixel_list->lists[i].nodes));
if (pixel_list->lists[i].nodes == (ListNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->lists[i].nodes,0,65537UL*
sizeof(*pixel_list->lists[i].nodes));
}
pixel_list->signature=MagickSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const ssize_t channel,
const size_t color)
{
register SkipList
*list;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
list=pixel_list->lists+channel;
list->nodes[color].signature=pixel_list->signature;
list->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=list->level; level >= 0; level--)
{
while (list->nodes[search].next[level] < color)
search=list->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (list->level+2))
level=list->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > list->level)
{
list->level++;
update[list->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
list->nodes[color].next[level]=list->nodes[update[level]].next[level];
list->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static void GetMaximumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
maximum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the maximum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
maximum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) maximum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMeanPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the mean value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMedianPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the median value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
do
{
color=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
channels[channel]=(unsigned short) color;
}
GetMagickPixelPacket((const Image *) NULL,pixel);
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetMinimumPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
minimum;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the minimum value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
count=0;
color=65536UL;
minimum=list->nodes[color].next[0];
do
{
color=list->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) minimum;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetModePixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
max_count,
mode;
ssize_t
count;
unsigned short
channels[5];
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
mode=color;
max_count=list->nodes[mode].count;
count=0;
do
{
color=list->nodes[color].next[0];
if (list->nodes[color].count > max_count)
{
mode=color;
max_count=list->nodes[mode].count;
}
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
channels[channel]=(unsigned short) mode;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetNonpeakPixelList(PixelList *pixel_list,MagickPixelPacket *pixel)
{
register SkipList
*list;
register ssize_t
channel;
size_t
color,
next,
previous;
ssize_t
count;
unsigned short
channels[5];
/*
Finds the non peak value for each of the colors.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
next=list->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=list->nodes[color].next[0];
count+=list->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
channels[channel]=(unsigned short) color;
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetRootMeanSquarePixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the root mean square value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
do
{
color=list->nodes[color].next[0];
sum+=(MagickRealType) (list->nodes[color].count*color*color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum);
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static void GetStandardDeviationPixelList(PixelList *pixel_list,
MagickPixelPacket *pixel)
{
MagickRealType
sum,
sum_squared;
register SkipList
*list;
register ssize_t
channel;
size_t
color;
ssize_t
count;
unsigned short
channels[ListChannels];
/*
Find the standard-deviation value for each of the color.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=list->nodes[color].next[0];
sum+=(MagickRealType) list->nodes[color].count*color;
for (i=0; i < (ssize_t) list->nodes[color].count; i++)
sum_squared+=((MagickRealType) color)*((MagickRealType) color);
count+=list->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
channels[channel]=(unsigned short) sqrt(sum_squared-(sum*sum));
}
pixel->red=(MagickRealType) ScaleShortToQuantum(channels[0]);
pixel->green=(MagickRealType) ScaleShortToQuantum(channels[1]);
pixel->blue=(MagickRealType) ScaleShortToQuantum(channels[2]);
pixel->opacity=(MagickRealType) ScaleShortToQuantum(channels[3]);
pixel->index=(MagickRealType) ScaleShortToQuantum(channels[4]);
}
static inline void InsertPixelList(const Image *image,const PixelPacket *pixel,
const IndexPacket *indexes,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(GetPixelRed(pixel));
signature=pixel_list->lists[0].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[0].nodes[index].count++;
else
AddNodePixelList(pixel_list,0,index);
index=ScaleQuantumToShort(GetPixelGreen(pixel));
signature=pixel_list->lists[1].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[1].nodes[index].count++;
else
AddNodePixelList(pixel_list,1,index);
index=ScaleQuantumToShort(GetPixelBlue(pixel));
signature=pixel_list->lists[2].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[2].nodes[index].count++;
else
AddNodePixelList(pixel_list,2,index);
index=ScaleQuantumToShort(GetPixelOpacity(pixel));
signature=pixel_list->lists[3].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[3].nodes[index].count++;
else
AddNodePixelList(pixel_list,3,index);
if (image->colorspace == CMYKColorspace)
index=ScaleQuantumToShort(GetPixelIndex(indexes));
signature=pixel_list->lists[4].nodes[index].signature;
if (signature == pixel_list->signature)
pixel_list->lists[4].nodes[index].count++;
else
AddNodePixelList(pixel_list,4,index);
}
static inline MagickRealType MagickAbsoluteValue(const MagickRealType x)
{
if (x < 0)
return(-x);
return(x);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register ListNode
*root;
register SkipList
*list;
register ssize_t
channel;
/*
Reset the skip-list.
*/
for (channel=0; channel < 5; channel++)
{
list=pixel_list->lists+channel;
root=list->nodes+65536UL;
list->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
}
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
Image
*statistic_image;
statistic_image=StatisticImageChannel(image,DefaultChannels,type,width,
height,exception);
return(statistic_image);
}
MagickExport Image *StatisticImageChannel(const Image *image,
const ChannelType channel,const StatisticType type,const size_t width,
const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**restrict pixel_list;
size_t
neighbor_height,
neighbor_width;
ssize_t
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(statistic_image,DirectClass) == MagickFalse)
{
InheritException(exception,&statistic_image->exception);
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
neighbor_width=width == 0 ? GetOptimalKernelWidth2D((double) width,0.5) :
width;
neighbor_height=height == 0 ? GetOptimalKernelWidth2D((double) height,0.5) :
height;
pixel_list=AcquirePixelListThreadSet(neighbor_width,neighbor_height);
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict statistic_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) neighbor_width/2L),y-
(ssize_t) (neighbor_height/2L),image->columns+neighbor_width,
neighbor_height,exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
statistic_indexes=GetCacheViewAuthenticIndexQueue(statistic_view);
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*restrict s;
register const PixelPacket
*restrict r;
register ssize_t
u,
v;
r=p;
s=indexes+x;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) neighbor_height; v++)
{
for (u=0; u < (ssize_t) neighbor_width; u++)
InsertPixelList(image,r+u,s+u,pixel_list[id]);
r+=image->columns+neighbor_width;
s+=image->columns+neighbor_width;
}
GetMagickPixelPacket(image,&pixel);
SetMagickPixelPacket(image,p+neighbor_width*neighbor_height/2,indexes+x+
neighbor_width*neighbor_height/2,&pixel);
switch (type)
{
case GradientStatistic:
{
MagickPixelPacket
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=pixel;
pixel.red=MagickAbsoluteValue(maximum.red-minimum.red);
pixel.green=MagickAbsoluteValue(maximum.green-minimum.green);
pixel.blue=MagickAbsoluteValue(maximum.blue-minimum.blue);
pixel.opacity=MagickAbsoluteValue(maximum.opacity-minimum.opacity);
if (image->colorspace == CMYKColorspace)
pixel.index=MagickAbsoluteValue(maximum.index-minimum.index);
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(pixel.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(pixel.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(pixel.blue));
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(pixel.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(statistic_indexes+x,ClampToQuantum(pixel.index));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
HYPRE_IJMatrix.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* HYPRE_IJMatrix interface
*
*****************************************************************************/
#include "./_hypre_IJ_mv.h"
#include "../HYPRE.h"
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixCreate
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixCreate( MPI_Comm comm,
HYPRE_BigInt ilower,
HYPRE_BigInt iupper,
HYPRE_BigInt jlower,
HYPRE_BigInt jupper,
HYPRE_IJMatrix *matrix )
{
HYPRE_BigInt info[2];
HYPRE_Int num_procs;
HYPRE_Int myid;
hypre_IJMatrix *ijmatrix;
HYPRE_BigInt row0, col0, rowN, colN;
ijmatrix = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ijmatrix) = comm;
hypre_IJMatrixObject(ijmatrix) = NULL;
hypre_IJMatrixTranslator(ijmatrix) = NULL;
hypre_IJMatrixAssumedPart(ijmatrix) = NULL;
hypre_IJMatrixObjectType(ijmatrix) = HYPRE_UNITIALIZED;
hypre_IJMatrixAssembleFlag(ijmatrix) = 0;
hypre_IJMatrixPrintLevel(ijmatrix) = 0;
hypre_IJMatrixOMPFlag(ijmatrix) = 0;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &myid);
if (ilower > iupper + 1 || ilower < 0)
{
hypre_error_in_arg(2);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (iupper < -1)
{
hypre_error_in_arg(3);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jlower > jupper + 1 || jlower < 0)
{
hypre_error_in_arg(4);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
if (jupper < -1)
{
hypre_error_in_arg(5);
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_IJMatrixRowPartitioning(ijmatrix)[0] = ilower;
hypre_IJMatrixRowPartitioning(ijmatrix)[1] = iupper + 1;
hypre_IJMatrixColPartitioning(ijmatrix)[0] = jlower;
hypre_IJMatrixColPartitioning(ijmatrix)[1] = jupper + 1;
/* now we need the global number of rows and columns as well
as the global first row and column index */
/* proc 0 has the first row and col */
if (myid == 0)
{
info[0] = ilower;
info[1] = jlower;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, 0, comm);
row0 = info[0];
col0 = info[1];
/* proc (num_procs-1) has the last row and col */
if (myid == (num_procs - 1))
{
info[0] = iupper;
info[1] = jupper;
}
hypre_MPI_Bcast(info, 2, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
rowN = info[0];
colN = info[1];
hypre_IJMatrixGlobalFirstRow(ijmatrix) = row0;
hypre_IJMatrixGlobalFirstCol(ijmatrix) = col0;
hypre_IJMatrixGlobalNumRows(ijmatrix) = rowN - row0 + 1;
hypre_IJMatrixGlobalNumCols(ijmatrix) = colN - col0 + 1;
*matrix = (HYPRE_IJMatrix) ijmatrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixDestroy( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (ijmatrix)
{
if hypre_IJMatrixAssumedPart(ijmatrix)
{
hypre_AssumedPartitionDestroy((hypre_IJAssumedPart*)hypre_IJMatrixAssumedPart(ijmatrix));
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixDestroyParCSR( ijmatrix );
}
else if ( hypre_IJMatrixObjectType(ijmatrix) != -1 )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
}
hypre_TFree(ijmatrix, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixInitialize( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR( ijmatrix ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
HYPRE_Int
HYPRE_IJMatrixInitialize_v2( HYPRE_IJMatrix matrix, HYPRE_MemoryLocation memory_location )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixInitializeParCSR_v2( ijmatrix, memory_location ) ;
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetPrintLevel( HYPRE_IJMatrix matrix,
HYPRE_Int print_level )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixPrintLevel(ijmatrix) = 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* This is a helper routine to compute a prefix sum of integer values.
*
* The current implementation is okay for modest numbers of threads.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PrefixSumInt(HYPRE_Int nvals,
HYPRE_Int *vals,
HYPRE_Int *sums)
{
HYPRE_Int j, nthreads, bsize;
nthreads = hypre_NumThreads();
bsize = (nvals + nthreads - 1) / nthreads; /* This distributes the remainder */
if (nvals < nthreads || bsize == 1)
{
sums[0] = 0;
for (j = 1; j < nvals; j++)
{
sums[j] += sums[j - 1] + vals[j - 1];
}
}
else
{
/* Compute preliminary partial sums (in parallel) within each interval */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j + bsize), nvals);
sums[j] = 0;
for (i = j + 1; i < n; i++)
{
sums[i] = sums[i - 1] + vals[i - 1];
}
}
/* Compute final partial sums (in serial) for the first entry of every interval */
for (j = bsize; j < nvals; j += bsize)
{
sums[j] = sums[j - bsize] + sums[j - 1] + vals[j - 1];
}
/* Compute final partial sums (in parallel) for the remaining entries */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = bsize; j < nvals; j += bsize)
{
HYPRE_Int i, n = hypre_min((j + bsize), nvals);
for (i = j + 1; i < n; i++)
{
sums[i] += sums[j];
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixSetValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values,
"set");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixSetValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixSetValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetConstantValues( HYPRE_IJMatrix matrix, HYPRE_Complex value)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetConstantValuesParCSR( ijmatrix, value));
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
HYPRE_IJMatrixAddToValues2(matrix, nrows, ncols, rows, NULL, cols, values);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAddToValues2( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
const HYPRE_BigInt *rows,
const HYPRE_Int *row_indexes,
const HYPRE_BigInt *cols,
const HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
/*
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
*/
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(7);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) != HYPRE_PARCSR )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_IJMatrixSetAddValuesParCSRDevice(ijmatrix, nrows, ncols, rows, row_indexes, cols, values,
"add");
}
else
#endif
{
HYPRE_Int *row_indexes_tmp = (HYPRE_Int *) row_indexes;
HYPRE_Int *ncols_tmp = ncols;
if (!ncols_tmp)
{
HYPRE_Int i;
ncols_tmp = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows; i++)
{
ncols_tmp[i] = 1;
}
}
if (!row_indexes)
{
row_indexes_tmp = hypre_CTAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_HOST);
hypre_PrefixSumInt(nrows, ncols_tmp, row_indexes_tmp);
}
if (hypre_IJMatrixOMPFlag(ijmatrix))
{
hypre_IJMatrixAddToValuesOMPParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
else
{
hypre_IJMatrixAddToValuesParCSR(ijmatrix, nrows, ncols_tmp, rows, row_indexes_tmp, cols, values);
}
if (!ncols)
{
hypre_TFree(ncols_tmp, HYPRE_MEMORY_HOST);
}
if (!row_indexes)
{
hypre_TFree(row_indexes_tmp, HYPRE_MEMORY_HOST);
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAssemble( HYPRE_IJMatrix matrix )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_IJMatrixMemoryLocation(matrix) );
if (exec == HYPRE_EXEC_DEVICE)
{
return ( hypre_IJMatrixAssembleParCSRDevice( ijmatrix ) );
}
else
#endif
{
return ( hypre_IJMatrixAssembleParCSR( ijmatrix ) );
}
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetRowCounts( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_BigInt *rows,
HYPRE_Int *ncols )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (nrows < 0)
{
hypre_error_in_arg(2);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetRowCountsParCSR( ijmatrix, nrows, rows, ncols );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetValues( HYPRE_IJMatrix matrix,
HYPRE_Int nrows,
HYPRE_Int *ncols,
HYPRE_BigInt *rows,
HYPRE_BigInt *cols,
HYPRE_Complex *values )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (nrows == 0)
{
return hypre_error_flag;
}
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (!ncols)
{
hypre_error_in_arg(3);
return hypre_error_flag;
}
if (!rows)
{
hypre_error_in_arg(4);
return hypre_error_flag;
}
if (!cols)
{
hypre_error_in_arg(5);
return hypre_error_flag;
}
if (!values)
{
hypre_error_in_arg(6);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixGetValuesParCSR( ijmatrix, nrows, ncols,
rows, cols, values );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixObjectType(ijmatrix) = type;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetObjectType( HYPRE_IJMatrix matrix,
HYPRE_Int *type )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*type = hypre_IJMatrixObjectType(ijmatrix);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixGetLocalRange( HYPRE_IJMatrix matrix,
HYPRE_BigInt *ilower,
HYPRE_BigInt *iupper,
HYPRE_BigInt *jlower,
HYPRE_BigInt *jupper )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
HYPRE_BigInt *row_partitioning;
HYPRE_BigInt *col_partitioning;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
row_partitioning = hypre_IJMatrixRowPartitioning(ijmatrix);
col_partitioning = hypre_IJMatrixColPartitioning(ijmatrix);
*ilower = row_partitioning[0];
*iupper = row_partitioning[1] - 1;
*jlower = col_partitioning[0];
*jupper = col_partitioning[1] - 1;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
/**
Returns a pointer to an underlying ijmatrix type used to implement IJMatrix.
Assumes that the implementation has an underlying matrix, so it would not
work with a direct implementation of IJMatrix.
@return integer error code
@param IJMatrix [IN]
The ijmatrix to be pointed to.
*/
HYPRE_Int
HYPRE_IJMatrixGetObject( HYPRE_IJMatrix matrix,
void **object )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
*object = hypre_IJMatrixObject( ijmatrix );
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetRowSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetRowSizesParCSR( ijmatrix, sizes ) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetDiagOffdSizes( HYPRE_IJMatrix matrix,
const HYPRE_Int *diag_sizes,
const HYPRE_Int *offdiag_sizes )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
hypre_IJMatrixSetDiagOffdSizesParCSR( ijmatrix, diag_sizes, offdiag_sizes );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetMaxOffProcElmts( HYPRE_IJMatrix matrix,
HYPRE_Int max_off_proc_elmts)
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR )
{
return ( hypre_IJMatrixSetMaxOffProcElmtsParCSR(ijmatrix,
max_off_proc_elmts) );
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixRead
* create IJMatrix on host memory
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixRead( const char *filename,
MPI_Comm comm,
HYPRE_Int type,
HYPRE_IJMatrix *matrix_ptr )
{
HYPRE_IJMatrix matrix;
HYPRE_BigInt ilower, iupper, jlower, jupper;
HYPRE_BigInt I, J;
HYPRE_Int ncols;
HYPRE_Complex value;
HYPRE_Int myid, ret;
char new_filename[255];
FILE *file;
hypre_MPI_Comm_rank(comm, &myid);
hypre_sprintf(new_filename, "%s.%05d", filename, myid);
if ((file = fopen(new_filename, "r")) == NULL)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_fscanf(file, "%b %b %b %b", &ilower, &iupper, &jlower, &jupper);
HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &matrix);
HYPRE_IJMatrixSetObjectType(matrix, type);
HYPRE_IJMatrixInitialize_v2(matrix, HYPRE_MEMORY_HOST);
/* It is important to ensure that whitespace follows the index value to help
* catch mistakes in the input file. See comments in IJVectorRead(). */
ncols = 1;
while ( (ret = hypre_fscanf(file, "%b %b%*[ \t]%le", &I, &J, &value)) != EOF )
{
if (ret != 3)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "Error in IJ matrix input file.");
return hypre_error_flag;
}
if (I < ilower || I > iupper)
{
HYPRE_IJMatrixAddToValues(matrix, 1, &ncols, &I, &J, &value);
}
else
{
HYPRE_IJMatrixSetValues(matrix, 1, &ncols, &I, &J, &value);
}
}
HYPRE_IJMatrixAssemble(matrix);
fclose(file);
*matrix_ptr = matrix;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixPrint( HYPRE_IJMatrix matrix,
const char *filename )
{
if (!matrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if ( (hypre_IJMatrixObjectType(matrix) != HYPRE_PARCSR) )
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
void *object;
HYPRE_IJMatrixGetObject(matrix, &object);
HYPRE_ParCSRMatrix par_csr = (HYPRE_ParCSRMatrix) object;
HYPRE_MemoryLocation memory_location = hypre_IJMatrixMemoryLocation(matrix);
if ( hypre_GetActualMemLocation(memory_location) == hypre_MEMORY_HOST )
{
hypre_ParCSRMatrixPrintIJ(par_csr, 0, 0, filename);
}
else
{
HYPRE_ParCSRMatrix par_csr2 = hypre_ParCSRMatrixClone_v2(par_csr, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixPrintIJ(par_csr2, 0, 0, filename);
hypre_ParCSRMatrixDestroy(par_csr2);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixSetOMPFlag
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixSetOMPFlag( HYPRE_IJMatrix matrix,
HYPRE_Int omp_flag )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
hypre_IJMatrixOMPFlag(ijmatrix) = omp_flag;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixTranspose( HYPRE_IJMatrix matrix_A,
HYPRE_IJMatrix *matrix_AT )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_AT;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
ij_AT = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_AT) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_AT) = NULL;
hypre_IJMatrixTranslator(ij_AT) = NULL;
hypre_IJMatrixAssumedPart(ij_AT) = NULL;
hypre_IJMatrixObjectType(ij_AT) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_AT) = 1;
hypre_IJMatrixPrintLevel(ij_AT) = hypre_IJMatrixPrintLevel(ij_A);
hypre_IJMatrixGlobalFirstRow(ij_AT) = hypre_IJMatrixGlobalFirstCol(ij_A);
hypre_IJMatrixGlobalFirstCol(ij_AT) = hypre_IJMatrixGlobalFirstRow(ij_A);
hypre_IJMatrixGlobalNumRows(ij_AT) = hypre_IJMatrixGlobalNumCols(ij_A);
hypre_IJMatrixGlobalNumCols(ij_AT) = hypre_IJMatrixGlobalNumRows(ij_A);
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_AT)[i] = hypre_IJMatrixColPartitioning(ij_A)[i];
hypre_IJMatrixColPartitioning(ij_AT)[i] = hypre_IJMatrixRowPartitioning(ij_A)[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixTransposeParCSR(ij_A, ij_AT);
}
else
{
hypre_error_in_arg(1);
}
*matrix_AT = (HYPRE_IJMatrix) ij_AT;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixNorm
*
* TODO: Add other norms
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixNorm( HYPRE_IJMatrix matrix,
HYPRE_Real *norm )
{
hypre_IJMatrix *ijmatrix = (hypre_IJMatrix *) matrix;
if (!ijmatrix)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
if (hypre_IJMatrixObjectType(ijmatrix) == HYPRE_PARCSR)
{
hypre_IJMatrixNormParCSR(ijmatrix, norm);
}
else
{
hypre_error_in_arg(1);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* HYPRE_IJMatrixAdd
*--------------------------------------------------------------------------*/
HYPRE_Int
HYPRE_IJMatrixAdd( HYPRE_Complex alpha,
HYPRE_IJMatrix matrix_A,
HYPRE_Complex beta,
HYPRE_IJMatrix matrix_B,
HYPRE_IJMatrix *matrix_C )
{
hypre_IJMatrix *ij_A = (hypre_IJMatrix *) matrix_A;
hypre_IJMatrix *ij_B = (hypre_IJMatrix *) matrix_B;
hypre_IJMatrix *ij_C;
HYPRE_BigInt *row_partitioning_A;
HYPRE_BigInt *col_partitioning_A;
HYPRE_BigInt *row_partitioning_B;
HYPRE_BigInt *col_partitioning_B;
HYPRE_Int i;
if (!ij_A)
{
hypre_error_in_arg(1);
return hypre_error_flag;
}
/* Check if A and B have the same row/col partitionings */
row_partitioning_A = hypre_IJMatrixRowPartitioning(ij_A);
row_partitioning_B = hypre_IJMatrixRowPartitioning(ij_B);
col_partitioning_A = hypre_IJMatrixColPartitioning(ij_A);
col_partitioning_B = hypre_IJMatrixColPartitioning(ij_B);
for (i = 0; i < 2; i++)
{
if (row_partitioning_A[i] != row_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same row partitioning!");
return hypre_error_flag;
}
if (col_partitioning_A[i] != col_partitioning_B[i])
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,
"Input matrices must have same col partitioning!");
return hypre_error_flag;
}
}
ij_C = hypre_CTAlloc(hypre_IJMatrix, 1, HYPRE_MEMORY_HOST);
hypre_IJMatrixComm(ij_C) = hypre_IJMatrixComm(ij_A);
hypre_IJMatrixObject(ij_C) = NULL;
hypre_IJMatrixTranslator(ij_C) = NULL;
hypre_IJMatrixAssumedPart(ij_C) = NULL;
hypre_IJMatrixObjectType(ij_C) = hypre_IJMatrixObjectType(ij_A);
hypre_IJMatrixAssembleFlag(ij_C) = 1;
hypre_IJMatrixPrintLevel(ij_C) = hypre_IJMatrixPrintLevel(ij_A);
/* Copy row/col partitioning of A to C */
for (i = 0; i < 2; i++)
{
hypre_IJMatrixRowPartitioning(ij_C)[i] = row_partitioning_A[i];
hypre_IJMatrixColPartitioning(ij_C)[i] = col_partitioning_A[i];
}
if (hypre_IJMatrixObjectType(ij_A) == HYPRE_PARCSR)
{
hypre_IJMatrixAddParCSR(alpha, ij_A, beta, ij_B, ij_C);
}
else
{
hypre_error_in_arg(1);
}
*matrix_C = (HYPRE_IJMatrix) ij_C;
return hypre_error_flag;
}
|
score.c | #include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <ctype.h> // for ispunct()
#include <safeomp.h>
#include <reactor.h>
#include <RNACI.h>
#include "hashtable/poshash.h"
#include "hashtable/neghash.h"
#define THROW_MEMERR() error("unable to allocate memory")
#define FREE(ptr) if(ptr!=NULL) free(ptr)
// ----------------------------------------------------------------------------
// Hashtable interface
// ----------------------------------------------------------------------------
static inline bool is_pos_sentiment(const char *word, const int wordlen)
{
return in_pos_set(word, wordlen) != NULL;
}
static inline bool is_neg_sentiment(const char *word, const int wordlen)
{
return in_neg_set(word, wordlen) != NULL;
}
static inline int8_t get_sentiment_score(const char *word, const int wordlen)
{
if (is_pos_sentiment(word, wordlen))
return 1;
else if (is_neg_sentiment(word, wordlen))
return -1;
else
return 0;
}
// ----------------------------------------------------------------------------
// Finds the necessary size of the temporary storage
// ----------------------------------------------------------------------------
#define SCHED_LEN 64
static inline size_t max_strlen(SEXP s_, const int len, int nthreads)
{
size_t maxlen = 0;
// NOTE the reduction clause is deliberately missing from omp versions < 4,
// OpenMP didn't include max reductions before then.
#ifdef OMP_VER_4
#pragma omp parallel for simd num_threads(nthreads) schedule(static,SCHED_LEN) if(len>OMP_MIN_SIZE) reduction(max:maxlen)
#else
#pragma omp parallel for num_threads(nthreads) schedule(static,SCHED_LEN) if(len>OMP_MIN_SIZE) // no reduction!
#endif
for (int i=0; i<len; i++)
{
const char *const s = STR(s_, i);
size_t tmp = strlen(s) + 1;
#ifndef OMP_VER_4
#pragma omp critical
#endif
if (tmp > maxlen)
maxlen = tmp;
}
return maxlen;
}
// ----------------------------------------------------------------------------
// R interface
// ----------------------------------------------------------------------------
SEXP R_score(SEXP s_, SEXP nthreads_)
{
SEXP ret, ret_names;
SEXP positive, negative, scores, nwords;
CHECK_IS_STRINGS(s_, "s");
CHECK_IS_POSINT(nthreads_, "nthreads");
const int len = LENGTH(s_);
int nthreads = asInteger(nthreads_);
newRvec(positive, len, "int");
newRvec(negative, len, "int");
newRvec(scores, len, "int");
newRvec(nwords, len, "int");
const size_t slen = max_strlen(s_, len, nthreads);
int8_t check = 0;
#pragma omp parallel shared(check) num_threads(nthreads)
{
char *s = NULL;
// NOTE uncomment to simulate oom failure
// if (omp_get_thread_num() != 1)
s = malloc(slen * sizeof(*s));
// all threads tmp space malloc check
#pragma omp atomic// update
check += (s == NULL);
// malloc succeeded
if (!check)
{
#pragma omp for
for (int i=0; i<len; i++)
{
char *in = STR(s_, i);
size_t inlen = strlen(in) + 1;
if (inlen == 1) // STR(s_, i) == ''
{
INTEGER(positive)[i] = 0;
INTEGER(negative)[i] = 0;
INTEGER(scores)[i] = 0;
INTEGER(nwords)[i] = 0;
continue;
}
memcpy(s, in, inlen*sizeof(*s));
SAFE_SIMD
for (size_t j=0; j<inlen; j++)
{
if (ispunct(s[j]))
s[j] = ' ';
s[j] = tolower(s[j]);
}
int *const restrict pos = INTEGER(positive) + i;
int *const restrict neg = INTEGER(negative) + i;
int *const restrict sc = INTEGER(scores) + i;
int *const restrict nw = INTEGER(nwords) + i;
*pos = *neg = *sc = *nw = 0;
uint32_t start = 0;
uint32_t end;
for (uint32_t j=0; j<inlen; j++)
{
if (isspace(s[j]) || s[j] == '\0')
{
(*nw)++;
end = j;
s[end] = '\0'; // for gperf
int8_t score = get_sentiment_score(s+start, end-start);
*(sc) += score;
if (score > 0)
(*pos)++;
else if (score < 0)
(*neg)++;
j++;
while (isspace(s[j]))
j++;
start = j;
}
}
}
}
FREE(s);
}
// malloc failed - should be outside of parallel region for proper error handling
if (check)
THROW_MEMERR();
make_list_names(ret_names, 4, "positive", "negative", "score", "wc");
make_dataframe(ret, RNULL, ret_names, 4, positive, negative, scores, nwords);
unhideGC();
return ret;
}
|
intruder.c | /* =============================================================================
*
* intruder.c
*
* =============================================================================
*
* Copyright (C) Stanford University, 2006. All Rights Reserved.
* Author: Chi Cao Minh
*
* =============================================================================
*
* For the license of bayes/sort.h and bayes/sort.c, please see the header
* of the files.
*
* ------------------------------------------------------------------------
*
* For the license of kmeans, please see kmeans/LICENSE.kmeans
*
* ------------------------------------------------------------------------
*
* For the license of ssca2, please see ssca2/COPYRIGHT
*
* ------------------------------------------------------------------------
*
* For the license of lib/mt19937ar.c and lib/mt19937ar.h, please see the
* header of the files.
*
* ------------------------------------------------------------------------
*
* For the license of lib/rbtree.h and lib/rbtree.c, please see
* lib/LEGALNOTICE.rbtree and lib/LICENSE.rbtree
*
* ------------------------------------------------------------------------
*
* Unless otherwise noted, the following license applies to STAMP files:
*
* Copyright (c) 2007, Stanford University
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* * Neither the name of Stanford University nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY STANFORD UNIVERSITY ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL STANFORD UNIVERSITY BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
* =============================================================================
*/
#include <assert.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include "decoder.h"
#include "detector.h"
#include "dictionary.h"
#include "packet.h"
#include "stream.h"
#include "thread.h"
#include "timer.h"
#include "tm.h"
enum param_types {
PARAM_ATTACK = (unsigned char)'a',
PARAM_LENGTH = (unsigned char)'l',
PARAM_NUM = (unsigned char)'n',
PARAM_SEED = (unsigned char)'s',
PARAM_THREAD = (unsigned char)'t',
};
enum param_defaults {
PARAM_DEFAULT_ATTACK = 10,
PARAM_DEFAULT_LENGTH = 16,
PARAM_DEFAULT_NUM = 1 << 20,
PARAM_DEFAULT_SEED = 1,
PARAM_DEFAULT_THREAD = 1,
};
long global_params[256];
#if 0
= { /* 256 = ascii limit */
[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK,
[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH,
[PARAM_NUM] = PARAM_DEFAULT_NUM,
[PARAM_SEED] = PARAM_DEFAULT_SEED,
[PARAM_THREAD] = PARAM_DEFAULT_THREAD,
};
#endif
void global_param_init()
{
global_params[PARAM_ATTACK] = PARAM_DEFAULT_ATTACK;
global_params[PARAM_LENGTH] = PARAM_DEFAULT_LENGTH;
global_params[PARAM_NUM] = PARAM_DEFAULT_NUM;
global_params[PARAM_SEED] = PARAM_DEFAULT_SEED;
global_params[PARAM_THREAD] = PARAM_DEFAULT_THREAD;
}
typedef struct arg {
/* input: */
stream_t* streamPtr;
decoder_t* decoderPtr;
/* output: */
vector_t** errorVectors;
} arg_t;
/* =============================================================================
* displayUsage
* =============================================================================
*/
static void
displayUsage (const char* appName)
{
printf("Usage: %s [options]\n", appName);
puts("\nOptions: (defaults)\n");
printf(" a <UINT> Percent [a]ttack (%i)\n", PARAM_DEFAULT_ATTACK);
printf(" l <UINT> Max data [l]ength (%i)\n", PARAM_DEFAULT_LENGTH);
printf(" n <UINT> [n]umber of flows (%i)\n", PARAM_DEFAULT_NUM);
printf(" s <UINT> Random [s]eed (%i)\n", PARAM_DEFAULT_SEED);
printf(" t <UINT> Number of [t]hreads (%i)\n", PARAM_DEFAULT_THREAD);
exit(1);
}
/* =============================================================================
* parseArgs
* =============================================================================
*/
static void
parseArgs (long argc, char* const argv[])
{
long i;
long opt;
opterr = 0;
while ((opt = getopt(argc, argv, "a:l:n:s:t:")) != -1) {
switch (opt) {
case 'a':
case 'l':
case 'n':
case 's':
case 't':
global_params[(unsigned char)opt] = atol(optarg);
break;
case '?':
default:
opterr++;
break;
}
}
for (i = optind; i < argc; i++) {
fprintf(stderr, "Non-option argument: %s\n", argv[i]);
opterr++;
}
if (opterr) {
displayUsage(argv[0]);
}
}
/* =============================================================================
* processPackets
* =============================================================================
*/
void
processPackets (void* argPtr)
{
TM_THREAD_ENTER();
long threadId = thread_getId();
stream_t* streamPtr = ((arg_t*)argPtr)->streamPtr;
decoder_t* decoderPtr = ((arg_t*)argPtr)->decoderPtr;
vector_t** errorVectors = ((arg_t*)argPtr)->errorVectors;
detector_t* detectorPtr = PDETECTOR_ALLOC();
assert(detectorPtr);
PDETECTOR_ADDPREPROCESSOR(detectorPtr, &preprocessor_toLower);
vector_t* errorVectorPtr = errorVectors[threadId];
while (1) {
char* bytes;
TM_BEGIN();
bytes = TMSTREAM_GETPACKET(streamPtr);
TM_END();
if (!bytes) {
break;
}
packet_t* packetPtr = (packet_t*)bytes;
long flowId = packetPtr->flowId;
int_error_t error;
TM_BEGIN();
error = TMDECODER_PROCESS(decoderPtr,
bytes,
(PACKET_HEADER_LENGTH + packetPtr->length));
TM_END();
if (error) {
/*
* Currently, stream_generate() does not create these errors.
*/
assert(0);
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr, (void*)flowId);
assert(status);
}
char* data;
long decodedFlowId;
TM_BEGIN();
data = TMDECODER_GETCOMPLETE(decoderPtr, &decodedFlowId);
TM_END();
if (data) {
int_error_t error = PDETECTOR_PROCESS(detectorPtr, data);
P_FREE(data);
if (error) {
bool_t status = PVECTOR_PUSHBACK(errorVectorPtr,
(void*)decodedFlowId);
assert(status);
}
}
}
PDETECTOR_FREE(detectorPtr);
TM_THREAD_EXIT();
}
/* =============================================================================
* main
* =============================================================================
*/
MAIN(argc, argv)
{
/*
* Initialization
*/
global_param_init();
parseArgs(argc, (char** const)argv);
long numThread = global_params[PARAM_THREAD];
SIM_GET_NUM_CPU(numThread);
TM_STARTUP(numThread);
P_MEMORY_STARTUP(numThread);
thread_startup(numThread);
long percentAttack = global_params[PARAM_ATTACK];
long maxDataLength = global_params[PARAM_LENGTH];
long numFlow = global_params[PARAM_NUM];
long randomSeed = global_params[PARAM_SEED];
printf("Percent attack = %li\n", percentAttack);
printf("Max data length = %li\n", maxDataLength);
printf("Num flow = %li\n", numFlow);
printf("Random seed = %li\n", randomSeed);
dictionary_t* dictionaryPtr = dictionary_alloc();
assert(dictionaryPtr);
stream_t* streamPtr = stream_alloc(percentAttack);
assert(streamPtr);
long numAttack = stream_generate(streamPtr,
dictionaryPtr,
numFlow,
randomSeed,
maxDataLength);
printf("Num attack = %li\n", numAttack);
decoder_t* decoderPtr = decoder_alloc();
assert(decoderPtr);
vector_t** errorVectors = (vector_t**)SEQ_MALLOC(numThread * sizeof(vector_t*));
assert(errorVectors);
long i;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = vector_alloc(numFlow);
assert(errorVectorPtr);
errorVectors[i] = errorVectorPtr;
}
arg_t arg;
arg.streamPtr = streamPtr;
arg.decoderPtr = decoderPtr;
arg.errorVectors = errorVectors;
/*
* Run transactions
*/
// NB: Since ASF/PTLSim "REAL" is native execution, and since we are using
// wallclock time, we want to be sure we read time inside the
// simulator, or else we report native cycles spent on the benchmark
// instead of simulator cycles.
GOTO_SIM();
TIMER_T startTime;
TIMER_READ(startTime);
#ifdef OTM
#pragma omp parallel
{
processPackets((void*)&arg);
}
#else
thread_start(processPackets, (void*)&arg);
#endif
TIMER_T stopTime;
TIMER_READ(stopTime);
// NB: As above, timer reads must be done inside of the simulated region
// for PTLSim/ASF
GOTO_REAL();
printf("Elapsed time = %f seconds\n", TIMER_DIFF_SECONDS(startTime, stopTime));
/*
* Check solution
*/
long numFound = 0;
for (i = 0; i < numThread; i++) {
vector_t* errorVectorPtr = errorVectors[i];
long e;
long numError = vector_getSize(errorVectorPtr);
numFound += numError;
for (e = 0; e < numError; e++) {
long flowId = (long)vector_at(errorVectorPtr, e);
bool_t status = stream_isAttack(streamPtr, flowId);
assert(status);
}
}
printf("Num found = %li\n", numFound);
assert(numFound == numAttack);
/*
* Clean up
*/
for (i = 0; i < numThread; i++) {
vector_free(errorVectors[i]);
}
SEQ_FREE(errorVectors);
decoder_free(decoderPtr);
stream_free(streamPtr);
dictionary_free(dictionaryPtr);
TM_SHUTDOWN();
P_MEMORY_SHUTDOWN();
thread_shutdown();
MAIN_RETURN(0);
}
/* =============================================================================
*
* End of intruder.c
*
* =============================================================================
*/
|
lcm2_profiler.c | /** @file lcm2_profiler.c
*
* @par Copyright:
* 2009-2018 (C) Kai-Uwe Behrmann
*
* @brief littleCMS CMM profile generator for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* MIT <http://www.opensource.org/licenses/MIT>
* @since 2009/10/24
*/
#include "lcm2_profiler.h"
#include <assert.h>
#include <lcms2.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <wchar.h>
#ifndef OY_UNUSED
#if (__GNUC__*100 + __GNUC_MINOR__) >= 406
#define OY_UNUSED __attribute__ ((unused))
#elif defined(_MSC_VER)
#define OY_UNUSED __declspec(unused)
#else
#define OY_UNUSED
#endif
#endif
#ifndef OY_FALLTHROUGH
#if defined(__clang__)
#define OY_FALLTHROUGH
#elif __GNUC__ >= 7
#define OY_FALLTHROUGH __attribute__ ((fallthrough));
#else
#define OY_FALLTHROUGH
#endif
#endif
#if LCMS_VERSION < 2050
/* 'dscm' */
#define cmsSigProfileDescriptionMLTag 0x6473636d
#endif
#define lcm2Free_m(v) if(v) { free(v); v = NULL; }
extern lcm2Message_f lcm2msg_p;
static const int max_channels = 16;
/* core functions */
typedef struct {
cmsHTRANSFORM in2MySpace;
cmsHTRANSFORM mySpace2Out;
lcm2Sampler_f sampler;
void * sampler_variables;
int channelsIn;
int channelsProcess;
int channelsOut;
} lcm2Cargo_s;
int lcm2samplerDouble ( double in[],
double out[],
void * Cargo )
{
int i;
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
// color convert from input space to prcess color space
if(d->in2MySpace)
cmsDoTransform( d->in2MySpace, in, in, 1 );
// custom data processing
d->sampler(in,out,d->sampler_variables);
// converting from process space to output space
if(d->mySpace2Out)
cmsDoTransform( d->mySpace2Out, out, out, 1 );
// clipping
for(i = 0; i < d->channelsOut; ++i)
{
if(out[i] > 1.0)
out[i] = 1.0;
if(out[i] < 0.0)
out[i] = 0.0;
}
return TRUE;
}
int lcm2sampler16 (const cmsUInt16Number In[],
cmsUInt16Number Out[],
void * Cargo)
{
int i, v, result = TRUE;
double in[max_channels], out[max_channels],
scaler = 65536.0;
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
for(i = 0; i < d->channelsIn; ++i)
in[i] = In[i] / scaler;
result = lcm2samplerDouble( in, out, Cargo );
for(i = 0; i < d->channelsOut; ++i)
{
v = out[i] * scaler;
// integer clipping
if(v > 65535)
Out[i] = 65535;
else
Out[i] = v;
}
return result;
}
int lcm2samplerFloat ( const cmsFloat32Number In[],
cmsFloat32Number Out[],
void * Cargo )
{
int i, result = TRUE;
double in[max_channels], out[max_channels];
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
for(i = 0; i < d->channelsIn; ++i)
in[i] = In[i];
result = lcm2samplerDouble( in, out, Cargo );
for(i = 0; i < d->channelsOut; ++i)
Out[i] = out[i];
return result;
}
/** \addtogroup profiler ICC profiler API
* @brief Easy to use API to generate matrix and LUT ICC profiles.
*
* @{ */
/** Function lcm2OpenProfileFile
* @brief Open a profile from file
*
* @code
// create ICC profile with linear gamma, RGB.709 primaries + D65 from wildcard
if(in_space_profile) h_in_space = lcm2OpenProfileFile( "*srgblinear", NULL );
@endcode
*
* @param[in] my_space_profile operating color space.
* Use a file name or
* possible wildcards:
* - *srgblinear
* - *srgb
* - *lab
* - *rec601.625.linear
* - *rec601.525.linear
* @param[in] my_space_profile_path path name for
* for my_space_profile; optional
* @return lcms profile handle
*
* @version Oyranos: 0.9.6
* @date 2016/03/04
* @since 2016/03/04 (Oyranos: 0.9.6)
*/
cmsHPROFILE lcm2OpenProfileFile ( const char * my_space_profile,
const char * my_space_profile_path )
{
cmsHPROFILE h_my_space = 0;
if(my_space_profile_path == NULL) my_space_profile_path = "";
if(my_space_profile && my_space_profile[0])
{
char * full_name = (char*) malloc(strlen(my_space_profile_path) + strlen(my_space_profile) + 1);
if(!full_name) return NULL;
sprintf( full_name, "%s%s", my_space_profile_path, my_space_profile );
if(strcmp(my_space_profile,"*lab") == 0)
h_my_space = cmsCreateLab4Profile(cmsD50_xyY());
else
if(strcmp(my_space_profile,"*xyz") == 0)
h_my_space = cmsCreateXYZProfile( );
else
if(strcmp(my_space_profile,"*srgb") == 0)
h_my_space = cmsCreate_sRGBProfile( );
else
if(strcmp(my_space_profile,"*srgblinear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33,
0.30, 0.60,
0.15, 0.06,
0.3127,0.329 );
else /* ITU-R BT.601-7 625-line, 50 field/s systems */
if(strcmp(my_space_profile,"*rec601.625.linear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33,
0.29, 0.60,
0.15, 0.06,
0.3127,0.329 );
else /* ITU-R BT.601-7 525-line, 60/1.001, field/s systems */
if(strcmp(my_space_profile,"*rec601.525.linear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.63, 0.34,
0.31, 0.595,
0.155, 0.07,
0.3127,0.329 );
if(!h_my_space)
h_my_space = cmsOpenProfileFromFile( full_name, "rb" );
if(!h_my_space) { lcm2msg_p( 300, NULL, "no profile from %s", full_name); }
/*else printf("will use %s\n", full_name);*/
lcm2Free_m(full_name);
}
return h_my_space;
}
/** Function lcm2WriteProfileToFile
* @brief Write a profile to a file
*
* Suggested is a scheme of "space version vendor.icc".
*
* @code
// "My-Space_v1.0_myna.icc"
char * file_name = lcm2WriteProfileToFile( my_space_profile,
"My-Space", "v1.0", "myna" );
@endcode
*
* @param[in] my_space_profile the profile
* @param[in] my_space_profile_name the color space name
* @param[in] my_space_profile_version the version of the profile; optional
* @param[in] vendor_four_bytes the vendor, just four bytes; optional
* @return constructed file name;
* can be released with free()
*
* @version Oyranos: 0.9.6
* @date 2016/03/06
* @since 2016/02/16 (Oyranos: 0.9.6)
*/
char * lcm2WriteProfileToFile ( cmsHPROFILE my_space_profile,
const char * my_space_profile_name,
const char * my_space_profile_version,
const char * vendor_four_bytes )
{
int i;
i = 0;
char * fn = (char*) malloc(strlen(my_space_profile_name) +
(my_space_profile_version ? strlen(my_space_profile_version):0) +
(vendor_four_bytes ? strlen(vendor_four_bytes):0) + 8);
if(!fn) return fn;
sprintf( fn, "%s%s%s%s%s%s", my_space_profile_name,
my_space_profile_version ? " " : "", my_space_profile_version?my_space_profile_version:"",
vendor_four_bytes ? " " : "", vendor_four_bytes?vendor_four_bytes:"",
strstr(my_space_profile_name, ".icc") ? "" : ".icc" );
while(fn[i]) { if(fn[i] == ' ') fn[i] = '_'; ++i; }
cmsSaveProfileToFile( my_space_profile, fn );
return fn;
}
/** Function lcm2WriteProfileToMem
*
* Save a cmsHPROFILE to a in memory data blob
*
* @version Oyranos: 0.9.7
* @since 2008/12/28 (Oyranos: 0.9.7)
* @date 2017/06/07
*/
void * lcm2WriteProfileToMem ( cmsHPROFILE * profile,
size_t * size,
void * (*allocateFunc)(size_t size) )
{
int error = !profile;
void * data = 0;
cmsUInt32Number size_ = 0;
if(!error)
{
*size = 0;
if(!cmsSaveProfileToMem( profile, NULL, &size_ ))
lcm2msg_p( 300, NULL, "cmsSaveProfileToMem failed" );
if(size_)
{
if(allocateFunc)
data = allocateFunc( size_ );
else
data = malloc( size_ );
cmsSaveProfileToMem( profile, data, &size_ );
} else
lcm2msg_p( 300, NULL, "can not convert lcms2 profile to memory" );
*size = size_;
} else
lcm2msg_p( 301, NULL, "no profle" );
return data;
}
/* --- CIE*Lab space familiy --- */
/** \addtogroup samplers Samplers
* @{ */
static double CIE_C_scaler = M_SQRT2; /* fit all Lab into LCh */
/** Function lcm2SamplerLab2LCh
* @brief CIE*Lab -> CIE*LCh in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input Lab triple
* @param[out] o output LCh triple
* @param none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/13/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerLab2LCh ( const double i[],
double o[],
void * none OY_UNUSED )
{
double a = (i[1] - 0.5) * CIE_C_scaler,
b = (i[2] - 0.5) * CIE_C_scaler;
/* CIE*L */
o[0] = i[0];
/* CIE*C = sqrt(CIE*a² + CIE*b²) */
o[1] = hypot(a,b);
/* CIE*h = atan2(CIE*b, CIE*a) */
o[2] = atan2(b,a)/M_PI/2.0 + 0.5;
}
/** Function lcm2SamplerLCh2Lab
* @brief CIE*LCh -> CIE*Lab in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input LCh triple
* @param[out] o output Lab triple
* @param none unused
*
* @version Oyranos: 0.9.7
* @date 2017/12/05
* @since 2016/13/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerLCh2Lab ( const double i[],
double o[],
void * none OY_UNUSED )
{
/* CIE*L */
o[0] = i[0];
/* CIE*a = C * cos(h) */
o[1] = 1.0 - (i[1] * cos(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5);
/* CIE*b = C * sin(h) */
o[2] = 1.0 - (i[1] * sin(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5);
}
/* sRGB */
cmsViewingConditions lcm2_vc_srgb_ =
{
{ 95.05, 100.0, 108.88 }, /* D65 white point */
20, /* viewing background luminance Yb */
4, /* ambient in cd/m² (== 64 lux) */
2, /* Dim sourround */
1 /* adapted (0-1) */
};
/** Function lcm2SamplerJCh2Lab
* @brief CIE*LCh -> CIE*Lab in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input LCh triple
* @param[out] o output Lab triple
* @param[in] v (cmsViewingConditions*); optional, default sRGB
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerJCh2Lab ( const double i[],
double o[],
void * v )
{
cmsViewingConditions * vc = &lcm2_vc_srgb_;
cmsHANDLE vh;
cmsCIEXYZ XYZ;
cmsJCh JCh = { i[0], i[1], i[2] };
vh = cmsCIECAM02Init( NULL, v?v:vc );
cmsCIECAM02Reverse( vh, &JCh, &XYZ );
cmsCIECAM02Done( vh );
lcm2CIEXYZ2iccLab( &XYZ, o );
}
/** Function lcm2SamplerLab2JCh
* @brief CIE*Lab -> CIE*JCh
*
* The CIECAM02 appearance space.
*
* @param[in] i input Lab triple
* @param[out] o output JCh triple
* @param[in] v (cmsViewingConditions*); optional, default sRGB
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerLab2JCh ( const double i[],
double o[],
void * v )
{
cmsViewingConditions * vc = &lcm2_vc_srgb_;
cmsHANDLE vh;
cmsCIEXYZ XYZ;
cmsJCh JCh;
lcm2iccLab2CIEXYZ( i, &XYZ );
vh = cmsCIECAM02Init( NULL, v?v:vc );
cmsCIECAM02Forward( vh, &XYZ, &JCh );
cmsCIECAM02Done( vh );
o[0] = JCh.J;
o[1] = JCh.C;
o[2] = JCh.h;
}
/* --- YCbCr space familiy --- */
typedef enum {
ITU_R_BT_601,
ITU_R_BT_601_JPEG,
ITU_REC_709,
ITU_R_BT_2020
} ITU_Std_e;
const char * ITU_Std_dscr [] = { "ITU-R BT.601", "ITU-R BT.601 / JPEG", "ITU REC-709", "ITU-R BT.2020", NULL };
static void selectKbKr( ITU_Std_e ITU_Std, double * Kb, double * Kr )
{
switch(ITU_Std)
{
case ITU_R_BT_601:
case ITU_R_BT_601_JPEG:
// ITU-R BT.601 - JPEG
*Kb = 0.114;
*Kr = 0.299;
break;
case ITU_REC_709:
// ITU REC-709
*Kb = 0.0722;
*Kr = 0.2126;
break;
case ITU_R_BT_2020:
// ITU-R BT.2020
*Kb = 0.0593;
*Kr = 0.2627;
break;
}
}
void selectBlackScale( ITU_Std_e ITU_Std, double * black, double * scale )
{
switch(ITU_Std)
{
case ITU_R_BT_601_JPEG:
*black = 0;
*scale = 255;
break;
case ITU_R_BT_601:
case ITU_REC_709:
case ITU_R_BT_2020:
*black = 16;
*scale = 219;
break;
}
}
void linear2ycbcr( double *L_ )
{
double L = *L_;
double alpha = 1.09929682680944,
beta = 0.018053968510807;
// linear -> gamma
if(L < beta)
L *= 4.5;
else
L = pow(L,0.45) - (alpha - 1);
*L_ = L;
}
void ycbcr2linear( double *V_ )
{
double L = *V_;
double alpha = 1.09929682680944,
beta = 0.081243; /* 0.018053968510807 * 4.5 */
// linear -> gamma
if(L < beta)
L /= 4.5;
else
L = pow( (L + (alpha-1)) / alpha, 1.0/0.45 );
*V_ = L;
}
static void rgb2ycbcr( double R, double G, double B,
double *Y_, double *Pb_, double *Pr_,
double Kb, double Kr )
{
double Y,Pb,Pr;
// common RGB -> YCbCr formula
Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B;
Pb = 1.0/2.0 * (B-Y)/(1.0-Kb);
Pr = 1.0/2.0 * (R-Y)/(1.0-Kr);
*Y_ = Y; *Pb_ = Pb; *Pr_ = Pr;
}
static void ycbcr2rgb( double Y, double Pb, double Pr,
double *R_, double *G_, double *B_,
double Kb, double Kr )
{
double R,G,B;
// common YCbCr -> RGB formula
// Pb = 1.0/2.0 * (B-Y)/(1.0-Kb);
// 2*Pb = (B-Y)/(1-Kb)
// 2*Pb*(1-Kb) = B-Y
// 2*Pb*(1-Kb)+Y = B
B = 2*Pb*(1-Kb) + Y;
// Pr = 1.0/2.0 * (R-Y)/(1.0-Kr);
// 2*Pr*(1-Kr)+Y = R
R = 2*Pr*(1-Kr) + Y;
// Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B;
// Y-(Kr*R)-(Kb*B) = (1-Kb-Kr) * G
// (Y-(Kr*R)-(Kb*B))/(1-Kb-Kr) = G
G = (Y - Kr*R - Kb*B)/(1.0-Kb-Kr);
*R_ = R; *G_ = G; *B_ = B;
}
static void scaleRGB( ITU_Std_e ITU_Std, double scale, double * R, double * G, double * B )
{
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*R *= scale;
*G *= scale;
*B *= scale;
break;
}
}
static void scaleLinearToYCbCr( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr )
{
max /= 255.0;
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
*Y *= (235.*max-16.*max);
*Y += 16.*max;
*Cb *= (240.*max-16.*max);
*Cb += 128.*max;
*Cr *= (240.*max-16.*max);
*Cr += 128.*max;
break;
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*Y *= 255.*max;
*Cb *= 255.*max;
*Cb += 128.*max;
*Cr *= 255.*max;
*Cr += 128.*max;
break;
}
}
static void scaleYCbCrToLinear( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr )
{
max /= 255.0;
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
*Y -= 16.*max;
*Y /= (235.*max-16.*max);
*Cb -= 128.*max;
*Cb /= (240.*max-16.*max);
*Cr -= 128.*max;
*Cr /= (240.*max-16.*max);
break;
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*Y /= 255.*max;
*Cb -= 128.*max;
*Cb /= 255.*max;
*Cr -= 128.*max;
*Cr /= 255.*max;
break;
}
}
/** Function lcm2SamplerRGB2JpegYCbCr
* @brief RGB -> YCbCr in Jpeg range
*
* ITU R BT 601 / REC.601 coefficients with Jpeg range of 0-1 is generated.
*
* @param[in] i input RGB triple
* @param[out] o output REC.601 YCbCr in JPEG range triple
* @param none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/07 (Oyranos: 0.9.6)
*/
void lcm2SamplerRGB2JpegYCbCr (
const double i[],
double o[],
void * none OY_UNUSED )
{
/* final space PCS.Lab -> YCbCr */
/** Jpeg assumes no gamma correction.
* Thus this sampler converts from RGB.
*/
ITU_Std_e std = ITU_R_BT_601_JPEG;
double Kr,Kb,
Y = i[0], Pb = i[1], Pr = i[2],
R = i[0], G = i[1], B = i[2];
selectKbKr( std, &Kb, &Kr );
scaleRGB( std, 1.0, &R, &G, &B );
rgb2ycbcr( R, G, B, &Y, &Pb, &Pr, Kb,Kr );
scaleLinearToYCbCr( std, 1.0, &Y, &Pb, &Pr );
o[0] = Y; o[1] = Pb; o[2] = Pr;
}
/** Function lcm2SamplerRGB2JpegYCbCr
* @brief YCbCr in Jpeg range -> RGB
*
* ITU R BT 601 / REC.601 coefficients in Jpeg range of 0-1 is assumed.
*
* @param[in] i input REC.601 YCbCr in JPEG range triple
* @param[out] o output RGB triple
* @param none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerJpegYCbCr2RGB( const double i[],
double o[],
void * none OY_UNUSED )
{
/* final space YCbCr -> PCS.Lab
* Jpeg assumes no gamma correction
* Thus this sampler converts to RGB
* YCbCr -> scale range -> linear YCbCr -> (linear RGB (REC.709) -> Lab)
*/
ITU_Std_e std = ITU_R_BT_601_JPEG;
double Kr,Kb,
Y = i[0], Pb = i[1], Pr = i[2],
R,G,B;
selectKbKr( std, &Kb, &Kr );
scaleYCbCrToLinear( std, 1.0, &Y, &Pb, &Pr );
ycbcr2rgb( Y, Pb, Pr, &R, &G, &B, Kb,Kr );
scaleRGB( std, 1.0, &R, &G, &B );
o[0] = R; o[1] = G; o[2] = B;
}
/** Function lcm2SamplerIdendity
* @brief Lab -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param none unused
*
* @version Oyranos: 0.9.7
* @date 2018/02/26
* @since 2018/02/26 (Oyranos: 0.9.7)
*/
void lcm2SamplerIdendity ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0]; // L / CIE*L / Y / R
o[1] = i[1]; // M / CIE*a / Cb / G
o[2] = i[2]; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerGrayer
* @brief Lab -> Gray -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerGrayer ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0]*1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerBlacknWhite
* @brief Lab -> Black&White -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerBlacknWhite ( const double i[],
double o[],
void * none OY_UNUSED )
{
if(i[0] <= 0.5)
o[0] = 0.0; // L / CIE*L / Y / R
else
o[0] = 1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerSepia
* @brief Lab -> LCh -> Yellow -> LCh -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Creates a single reddish hue.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/14
* @since 2016/03/14 (Oyranos: 0.9.6)
*/
void lcm2SamplerSepia ( const double i[],
double o[],
void * none )
{
double in[3],out[3];
lcm2SamplerLab2LCh( i,in,none );
out[0] = in[0];
out[1] = 0.04+0.04*in[0];
out[2] = 0.18;
lcm2SamplerLCh2Lab( out,o,none );
}
/** Function lcm2SamplerReddish
* @brief Lab -> reddish tint -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like Sepia, but gives all colors a reddish tint.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/15
* @since 2016/03/15 (Oyranos: 0.9.6)
*/
void lcm2SamplerReddish ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0];
o[1] = i[1] + 0.012+0.012*i[0];
o[2] = i[2] + 0.025+0.025*i[0];
}
/** Function lcm2SamplerWhitePointLab
* @brief Lab -> White Point Adaption -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like reddish, but adapts all colors to a given white point difference.
* It uses simple linear adaption inside CIE*Lab.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two doubles with
* desired ICC*ab differences
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2017/05/17 (Oyranos: 0.9.7)
*/
void lcm2SamplerWhitePointLab( const double i[],
double o[],
void * data )
{
double * icc_ab = (double*) data;
o[0] = i[0];
o[1] = i[1] + icc_ab[0] * i[0];
o[2] = i[2] + icc_ab[1] * i[0];
}
/** Function lcm2iccLab2CIEXYZ
* @brief ICC*Lab -> CIE*XYZ
*
* Converts from PCS Lab encoding to lcms XYZ type.
*
* @param[in] icc_Lab input Lab triple in PCS range
* @param[out] XYZ output XYZ struct
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2iccLab2CIEXYZ ( const double * icc_Lab,
cmsCIEXYZ * XYZ )
{
cmsCIELab Lab;
Lab.L = icc_Lab[0] * 100.0;
Lab.a = icc_Lab[1] * 257.0 - 128.0;
Lab.b = icc_Lab[2] * 257.0 - 128.0;
cmsLab2XYZ( cmsD50_XYZ(), XYZ, &Lab);
}
/** Function lcm2CIEXYZ2iccLab
* @brief CIE*XYZ -> ICC*Lab
*
* Converts from lcms XYZ type to PCS Lab encoding.
*
* @param[in] XYZ input XYZ struct
* @param[out] icc_Lab output Lab triple in PCS range
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2CIEXYZ2iccLab ( const cmsCIEXYZ * XYZ,
double * icc_Lab )
{
cmsCIELab Lab;
cmsXYZ2Lab( cmsD50_XYZ(), &Lab, XYZ );
icc_Lab[0] = Lab.L / 100.0;
icc_Lab[1] = (Lab.a + 128.0) / 257.0;
icc_Lab[2] = (Lab.b + 128.0) / 257.0;
}
/** Function lcm2iccXYZ2iccLab
* @brief ICC*XYZ -> ICC*Lab
*
* Converts from PCS XYZ to PCS Lab encoding.
*
* @param[in] XYZ input XYZ triple
* @param[out] icc_Lab output Lab triple in PCS range
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2iccXYZ2iccLab ( const double * XYZ,
double * icc_Lab )
{
cmsCIEXYZ XYZ_ = { XYZ[0], XYZ[1], XYZ[2] };
lcm2CIEXYZ2iccLab( &XYZ_, icc_Lab );
}
/** Function lcm2SamplerWhitePointBradford
* @brief Lab -> Bradford White Point Adaption -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like reddish, but adapts all colors to a given white point difference.
* It uses Bradford CAT.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two doubles with
* source ICC*XYZ white point, followed by
* destination ICC*XYZ whitepoint
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerWhitePointBradford ( const double i[],
double o[],
void * data )
{
double * icc_XYZ = (double*) data;
double scale = 100.0;
cmsCIEXYZ srcXYZwtpt, iXYZ, oXYZ, dstXYZillu;
srcXYZwtpt.X = icc_XYZ[0] * scale;
srcXYZwtpt.Y = icc_XYZ[1] * scale;
srcXYZwtpt.Z = icc_XYZ[2] * scale;
dstXYZillu.X = icc_XYZ[3+0] * scale;
dstXYZillu.Y = icc_XYZ[3+1] * scale;
dstXYZillu.Z = icc_XYZ[3+2] * scale;
lcm2iccLab2CIEXYZ( i, &iXYZ );
cmsAdaptToIlluminant( &oXYZ, &srcXYZwtpt, &dstXYZillu, &iXYZ );
lcm2CIEXYZ2iccLab( &oXYZ, o );
}
/** Function lcm2SamplerProof
* @brief Lab -> proofing profile -> Lab
*
* Convert a proofing profile into a abstract one.
* Abstract profiles can easily be merged into a multi profile transform.
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two void* with
* - desired cmsHTRANSFORM
* for uint32_t arrays in PT_Lab
* - cmsFLAGS_GAMUTCHECK flag
*
* @version Oyranos: 0.9.7
* @since 2009/11/04 (Oyranos: 0.1.10)
* @date 2017/06/03
*/
void lcm2SamplerProof ( const double i[],
double o[],
void * data )
{
cmsCIELab Lab1, Lab2;
double d;
cmsFloat32Number i_[3], o_[3];
void ** ptr = (void**)data;
i_[0] = Lab1.L = i[0] * 100.0;
i_[1] = Lab1.a = i[1] * 257.0 - 128.0;
i_[2] = Lab1.b = i[2] * 257.0 - 128.0;
cmsDoTransform( ptr[0], i_, o_, 1 );
Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2];
d = cmsDeltaE( &Lab1, &Lab2 );
if((fabs(d) > 10) && ptr[1] != NULL)
{
Lab2.L = 50.0;
Lab2.a = Lab2.b = 0.0;
}
o[0] = Lab2.L/100.0;
o[1] = (Lab2.a + 128.0) / 257.0;
o[2] = (Lab2.b + 128.0) / 257.0;
}
/** Function lcm2SamplerProofD
* @brief Lab -> proofing profile -> Lab
*
* Convert a proofing profile into a abstract one.
* Abstract profiles can easily be merged into a multi profile transform.
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two void* with
* - desired cmsHTRANSFORM and
* for uint64_t arrays in PT_Lab
* - cmsFLAGS_GAMUTCHECK flag
*
* @version Oyranos: 0.9.7
* @since 2009/11/04 (Oyranos: 0.1.10)
* @date 2017/11/06
*/
void lcm2SamplerProofD ( const double i[],
double o[],
void * data )
{
cmsCIELab Lab1, Lab2;
double d;
cmsFloat64Number i_[3], o_[3];
void ** ptr = (void**)data;
i_[0] = Lab1.L = i[0] * 100.0;
i_[1] = Lab1.a = i[1] * 257.0 - 128.0;
i_[2] = Lab1.b = i[2] * 257.0 - 128.0;
cmsDoTransform( ptr[0], i_, o_, 1 );
Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2];
d = cmsDeltaE( &Lab1, &Lab2 );
if((fabs(d) > 10) && ptr[1] != NULL)
{
Lab2.L = 50.0;
Lab2.a = Lab2.b = 0.0;
}
o[0] = Lab2.L/100.0;
o[1] = (Lab2.a + 128.0) / 257.0;
o[2] = (Lab2.b + 128.0) / 257.0;
}
/** @} */ /* samplers */
/** Function lcm2CreateProfileLutByFunc
* @brief Generate a ICC profile LUT
*
* This function takes a series of parameters and functions to create a
* ICC profile from. The sampler function operates in a input space and
* and creates colors in a output space. These values are filled into the
* profile LUT. It is possible to create effect profiles of class abstract
* or LUT profiles in any other color space including device links.
*
* For some already available sampler funtions see @ref samplers.
*
* @param[in,out] profile profile to add LUT table
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] my_space_profile operating color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] in_space_profile input color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] tag_sig tag signature for the generated LUT;
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateProfileLutByFunc (
cmsHPROFILE profile,
lcm2Sampler_f samplerMySpace,
void * samplerArg,
const char * in_space_profile,
const char * my_space_profile,
const char * out_space_profile,
int grid_size,
cmsTagSignature tag_sig
)
{
cmsToneCurve * t[max_channels];
int i;
int error = 0;
if(!profile) return 1;
t[0] = cmsBuildGamma(0, 1.0);
if(!t[0]) return 1;
for(i = 1; i < max_channels; ++i) t[i] = t[0];
error = lcm2CreateProfileLutByFuncAndCurves (
profile,
samplerMySpace,
samplerArg,
t, t,
in_space_profile,
my_space_profile,
out_space_profile,
grid_size, tag_sig
);
cmsFreeToneCurve( t[0] );
return error;
}
/** Function lcm2CreateProfileLutByFuncAndCurves
* @brief Generate a ICC profile LUT
*
* This function takes a series of parameters and functions to create a
* ICC profile from. The sampler function operates in a input space and
* and creates colors in a output space. These values are filled into the
* profile LUT. It is possible to create effect profiles of class abstract
* or LUT profiles in any other color space including device links.
*
* For some already available sampler funtions see @ref samplers.
*
* @param[in,out] profile profile to add LUT table
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] in_curves input curves
* @param[in] out_curves output curves
* @param[in] my_space_profile operating color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] in_space_profile input color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] tag_sig tag signature for the generated LUT;
*
* @version Oyranos: 0.9.6
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateProfileLutByFuncAndCurves (
cmsHPROFILE profile,
lcm2Sampler_f samplerMySpace,
void * samplerArg,
cmsToneCurve * in_curves[],
cmsToneCurve * out_curves[],
const char * in_space_profile,
const char * my_space_profile,
const char * out_space_profile,
int grid_size,
cmsTagSignature tag_sig
)
{
cmsHPROFILE h_in_space = 0,
h_my_space = 0,
h_out_space = 0;
cmsHTRANSFORM tr_In2MySpace = 0, tr_MySpace2Out = 0;
cmsStage * gmt_lut = 0, * gmt_lut16 = 0;
cmsPipeline * gmt_pl = cmsPipelineAlloc( 0,3,3 ),
* gmt_pl16 = cmsPipelineAlloc( 0,3,3 );
lcm2Cargo_s cargo;
int i;
int error = 0;
int in_layout, my_layout, out_layout;
in_layout = my_layout = out_layout = (FLOAT_SH(1)|CHANNELS_SH(3)|BYTES_SH(0));
if(!profile) return 1;
if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL );
if(my_space_profile) h_my_space = lcm2OpenProfileFile( my_space_profile, NULL );
if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL );
if(h_in_space && h_my_space && strcmp(in_space_profile,my_space_profile) != 0)
{
tr_In2MySpace = cmsCreateTransformTHR ( 0, h_in_space, in_layout,
h_my_space, my_layout,
INTENT_RELATIVE_COLORIMETRIC,
cmsFLAGS_NOOPTIMIZE);
if(!tr_In2MySpace) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; }
}
if(h_my_space && h_out_space && strcmp(my_space_profile,out_space_profile) != 0)
{
tr_MySpace2Out = cmsCreateTransformTHR( 0, h_my_space, my_layout,
h_out_space, out_layout,
INTENT_RELATIVE_COLORIMETRIC,
cmsFLAGS_NOOPTIMIZE);
if(!tr_MySpace2Out) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; }
}
memset(&cargo, 0, sizeof(lcm2Cargo_s));
cargo.in2MySpace = tr_In2MySpace;
cargo.mySpace2Out = tr_MySpace2Out;
cargo.sampler = samplerMySpace;
cargo.sampler_variables = samplerArg,
cargo.channelsIn = h_in_space ? cmsChannelsOf( cmsGetColorSpace( h_in_space ) ) : 3;
cargo.channelsProcess = h_my_space ? cmsChannelsOf( cmsGetColorSpace( h_my_space ) ) : 3;
cargo.channelsOut = h_out_space ? cmsChannelsOf( cmsGetColorSpace( h_out_space ) ) : 3;
#pragma omp parallel for
for(i = 0; i < 2; ++i)
{
if(i)
{
gmt_lut16 = cmsStageAllocCLut16bit( 0, grid_size, 3,3,0 );
cmsStageSampleCLut16bit( gmt_lut16, lcm2sampler16, &cargo, 0 );
} else
{
gmt_lut = cmsStageAllocCLutFloat( 0, grid_size, 3,3,0 );
cmsStageSampleCLutFloat( gmt_lut, lcm2samplerFloat, &cargo, 0 );
}
}
/* 16-bit int */
cmsPipelineInsertStage( gmt_pl16, cmsAT_BEGIN,
cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) );
cmsPipelineInsertStage( gmt_pl16, cmsAT_END, gmt_lut16 );
cmsPipelineInsertStage( gmt_pl16, cmsAT_END,
cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) );
cmsWriteTag( profile, (tag_sig!=0)?tag_sig:cmsSigAToB0Tag, gmt_pl16 );
/* float */
/* cmsPipeline owns the cmsStage memory */
cmsPipelineInsertStage( gmt_pl, cmsAT_BEGIN,
cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) );
cmsPipelineInsertStage( gmt_pl, cmsAT_END, gmt_lut );
cmsPipelineInsertStage( gmt_pl, cmsAT_END,
cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) );
//cmsWriteTag( gmt, cmsSigDToB0Tag, gmt_pl );
lcm2CreateProfileLutByFuncAndCurvesClean:
if(h_in_space) {cmsCloseProfile( h_in_space );} h_in_space = 0;
if(h_my_space) {cmsCloseProfile( h_my_space );} h_my_space = 0;
if(h_out_space) {cmsCloseProfile( h_out_space );} h_out_space = 0;
if(tr_In2MySpace) {cmsDeleteTransform( tr_In2MySpace );} tr_In2MySpace = 0;
if(tr_MySpace2Out) {cmsDeleteTransform( tr_MySpace2Out );} tr_MySpace2Out = 0;
if(gmt_pl16) cmsPipelineFree( gmt_pl16 );
if(gmt_pl) cmsPipelineFree( gmt_pl );
return error;
}
/** Function lcm2CreateProfileLutByMatrixAndCurves
* @brief Generate a ICC Lut profile
*
* This function takes a series of parameters to create a
* ICC profile from. It is possible to create effect profiles of class abstract
* or LUT profiles in any other color space including device links.
* The LUT will contain B curves, a matrix and M curves. The allowed number
* of color channels is 3.
*
* @param[in,out] profile profile to add LUT table;
* requires a version 4.3 profile
* @param[in] in_curves input curves
* @param[in] matrix the 3x3 matrix
* @param[in] out_curves output curves
* @param[in] in_space_profile input color space
* for wildcards see lcm2OpenProfileFile()
* @param[in] out_space_profile output color space
* for wildcards see lcm2OpenProfileFile()
*
* @version Oyranos: 0.9.7
* @date 2019/03/03
* @since 2019/03/01 (Oyranos: 0.9.7)
*/
int lcm2CreateProfileLutByMatrixAndCurves (
cmsHPROFILE profile,
cmsToneCurve * in_curves[],
const double * matrix,
cmsToneCurve * out_curves[],
const char * in_space_profile,
const char * out_space_profile,
cmsTagSignature tag_sig
)
{
cmsHPROFILE h_in_space = 0,
h_out_space = 0;
int channelsIn, channelsOut;
cmsPipeline * pl = cmsPipelineAlloc( 0,3,3 );
int error = 0;
if(!profile) return 1;
if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL );
if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL );
channelsIn = h_in_space ? cmsChannelsOf( cmsGetColorSpace( h_in_space ) ) : 3;
channelsOut = h_out_space ? cmsChannelsOf( cmsGetColorSpace( h_out_space ) ) : 3;
if(channelsIn == 0 || channelsOut == 0 || !matrix)
{
error = 1;
goto lcm2CreateProfileLutByMatrixAndCurvesClean;
}
cmsPipelineInsertStage( pl, cmsAT_BEGIN,
cmsStageAllocToneCurves( 0, channelsIn, in_curves ) );
cmsPipelineInsertStage( pl, cmsAT_END,
cmsStageAllocMatrix(0, channelsIn, channelsOut,
(const cmsFloat64Number*) matrix, NULL) );
cmsPipelineInsertStage( pl, cmsAT_END,
cmsStageAllocToneCurves( 0, channelsOut, out_curves ) );
cmsWriteTag( profile, (tag_sig!=0)?tag_sig:cmsSigAToB0Tag, pl );
lcm2CreateProfileLutByMatrixAndCurvesClean:
if(h_in_space) {cmsCloseProfile( h_in_space );} h_in_space = 0;
if(h_out_space) {cmsCloseProfile( h_out_space );} h_out_space = 0;
if(pl) cmsPipelineFree( pl );
return error;
}
/** Function lcm2CreateAbstractProfileM
* @brief Create a effect profile of type abstract in ICC*XYZ PCS
*
* Possible computation emlements are m_curve + matrix + b_curve or
* matrix only or b_curve only for PCS*XYZ.
* In case m_curve and b_curve are passed in, then
* PCS*Lab is assumed and m_curve is mapped to PSC*L and
* b_curve is mapped to PCS*ab.
*
* Here a code example:
* @code
double matrix[3][3] = {
{ 0.0000000, 0.9642957, 0.0000000},
{ 0.0000000, 1.0000000, 0.0000000},
{ 0.0000000, 0.8251046, 0.0000000}
};
const char * name_i18n[] = {
"de", "DE", "Graustufen (MyProject)",
"en", "US", "Grayer (MyProject)"
};
lcm2CreateAbstractProfileM ( NULL,
&matrix[0][0],
NULL,
4.3,
"Grayer (MyProject)",
name_i18n,
"Grayer myna",
"My Project 2019",
"My Name",
ICC_2011_LICENSE,
"CIE*XYZ",
"http://www.cie.co.at",
NULL,
NULL
);
@endcode
*
* @param[in] m_curve optional input curve for all CIE*XYZ;
* channels in range 0.0 ... PCS*XYZ_MAX
* @param[in] matrix the 3x3 matrix; optional if one of b_curve or both curves are specified
* @param[in] b_curve output curve for all PCS*XYZ; optional if matrix is specified;
* channels in range 0.0 ... 1.0;
* use LCM2_ADAPT_TO_PCS_XYZ
* @param[in] icc_profile_version 4.3
* @param[in] my_abstract_description internal profile name
* @param[in] my_abstract_descriptions internal profile name translated
* @param[in] my_abstract_file_name profile file name. If present a ICC profile will be written to that name. optional
* @param[in] provider e.g. "My Project 2019"
* @param[in] vendor e.g. "My Name"
* @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile."
* - first %%s is replaced by the provider string arg and
* - second %%s is replaced by the vendor string arg
* @param[in] device_model e.g. "My Set"
* @param[in] device_manufacturer e.g. "www.mydomain.net"
* @param[in] my_meta_data e.g. {"DOMAIN_,GROUP_","DOMAIN_key1","value1","GROUP_key2","value2"}
* @param[out] h_profile the resulting profile
*
* @version Oyranos: 0.9.7
* @date 2019/03/09
* @since 2019/03/03 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractProfileM (
cmsToneCurve * m_curve,
const double * matrix,
cmsToneCurve * b_curve,
double icc_profile_version,
const char * my_abstract_description,
const char ** my_abstract_descriptions,
const char * my_abstract_file_name,
const char * provider,
const char * vendor,
const char * my_license,
const char * device_model,
const char * device_manufacturer,
const char ** my_meta_data,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = 0;
int error = !matrix && !m_curve && !b_curve;
cmsToneCurve * m_curves[3] = {NULL,NULL,NULL},
* b_curves[3] = {NULL,NULL,NULL},
* allocated_m_curve = NULL, * allocated_b_curve = NULL;
int i;
const char * csp = "*xyz"; // color space CIE*XYZ with PCS*XYZ encoding range
if(m_curve == NULL)
allocated_m_curve = cmsBuildGamma(0, 1.0);
if(b_curve == NULL)
allocated_b_curve = cmsBuildGamma(0, 1.0);
if(error) goto lcm2CreateAbstractProfileMClean;
if(m_curve && matrix == NULL && b_curve)
{
csp = "*lab"; // CIE*Lab with PCS*Lab encoding range
b_curves[0] = m_curve; // CIE*L with PCS*Lab encoding range
b_curves[1] = b_curve; // CIE*a with PCS*Lab encoding range
b_curves[2] = b_curve; // CIE*b with PCS*Lab encoding range
}
else
{
for(i = 0; i < 3; ++i)
{
if(m_curve)
m_curves[i] = m_curve;
else
m_curves[i] = allocated_m_curve;
if(b_curve)
b_curves[i] = b_curve;
else
b_curves[i] = allocated_b_curve;
}
}
profile = lcm2CreateProfileFragment (
csp, csp,
icc_profile_version,
my_abstract_description,
provider, vendor, my_license,
device_model, device_manufacturer, NULL);
if(!profile) goto lcm2CreateAbstractProfileMClean;
if(my_meta_data)
lcm2AddMetaTexts ( profile, my_meta_data[0], &my_meta_data[1], cmsSigMetaTag );
if((m_curve && matrix == NULL && b_curve) || // L*ab
(m_curve == NULL && matrix == NULL)) // XYZ
{
cmsPipeline * pl = cmsPipelineAlloc( 0,3,3 );
cmsPipelineInsertStage( pl, cmsAT_END,
cmsStageAllocToneCurves( 0, 3, b_curves ) );
cmsWriteTag( profile, cmsSigAToB0Tag, pl );
if(pl) cmsPipelineFree( pl );
}
else
error = lcm2CreateProfileLutByMatrixAndCurves( profile,
m_curves, matrix, b_curves,
"*xyz", "*xyz",
cmsSigAToB0Tag );
if(error) goto lcm2CreateAbstractProfileMClean;
lcm2AddMluDescription ( profile, my_abstract_descriptions,
cmsSigProfileDescriptionMLTag
);
if(my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
}
if(h_profile)
*h_profile = profile;
else
cmsCloseProfile( profile );
lcm2CreateAbstractProfileMClean:
if(allocated_m_curve) cmsFreeToneCurve( allocated_m_curve );
if(allocated_b_curve) cmsFreeToneCurve( allocated_b_curve );
return error;
}
/** Function lcm2CreateAbstractProfile
* @brief Create a effect profile of type abstract in ICC*Lab PCS
*
* Here a code example:
* @code
void samplerGrayer (const double i[],
double o[])
{
o[0] = i[0]*1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
const char * name_i18n[] = {
"de", "DE", "Graustufen (MyProject)",
"en", "US", "Grayer (MyProject)"
};
lcm2CreateAbstractProfile (
samplerGrayer,
NULL,
"*lab", // CIE*Lab
5,
2.3,
"Grayer (MyProject)",
name_i18n,
"Grayer myna",
"My Project 2016",
"My Name",
ICC_2011_LICENSE,
"CIE*L",
"http://www.cie.co.at",
NULL,
NULL
);
@endcode
*
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] my_space_profile operating color space
* for samplerMySpace();
* "*lab" will set CIE*Lab
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[in] my_abstract_description internal profile name
* @param[in] my_abstract_descriptions internal profile name translated
* @param[in] my_abstract_file_name profile file name. If present a ICC profile will be written to that name. optional
* @param[in] provider e.g. "My Project 2016"
* @param[in] vendor e.g. "My Name"
* @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile."
* - first %%s is replaced by the provider string arg and
* - second %%s is replaced by the vendor string arg
* @param[in] device_model e.g. "My Set"
* @param[in] device_manufacturer e.g. "www.mydomain.net"
* @param[in] my_meta_data e.g. {"DOMAIN_,GROUP_","DOMAIN_key1","value1","GROUP_key2","value2"}
* @param[out] h_profile the resulting profile
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateAbstractProfile(
lcm2Sampler_f samplerMySpace,
void * samplerArg,
const char * my_space_profile,
int grid_size,
double icc_profile_version,
const char * my_abstract_description,
const char ** my_abstract_descriptions,
const char * my_abstract_file_name,
const char * provider,
const char * vendor,
const char * my_license,
const char * device_model,
const char * device_manufacturer,
const char ** my_meta_data,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = 0;
int error = 0;
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
my_abstract_description,
provider, vendor, my_license,
device_model, device_manufacturer, NULL);
if(!profile) goto lcm2CreateAbstractProfileClean;
if(my_meta_data)
lcm2AddMetaTexts ( profile, my_meta_data[0], &my_meta_data[1], cmsSigMetaTag );
error = lcm2CreateProfileLutByFunc( profile, samplerMySpace, samplerArg,
"*lab", my_space_profile, "*lab",
grid_size, cmsSigAToB0Tag );
if(error) goto lcm2CreateAbstractProfileClean;
lcm2AddMluDescription ( profile, my_abstract_descriptions,
cmsSigProfileDescriptionMLTag
);
if(my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
}
if(h_profile)
*h_profile = profile;
else
cmsCloseProfile( profile );
lcm2CreateAbstractProfileClean:
return error;
}
/** Function lcm2CreateAbstractTemperatureProfile
* @brief Create a effect profile of type abstract in ICC*Lab PCS from Kelvin
*
* @param[in] kelvin the desired temperature in Kelvin; ICC reference (D50) is 5000 Kelvin
* @param[in] source_white_profile a profile, e.g. the actual monitor profile; optional, default is D50
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2017/05/17 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractTemperatureProfile (
float kelvin,
cmsHPROFILE source_white_profile,
int grid_size,
double icc_profile_version,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
cmsCIEXYZ * source_white = NULL;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,white_point,atom",
"EFFECT_type", "CIEab",
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_ab[2];
char * desc = NULL;
if(error) return 1;
if(source_white_profile)
{
if(cmsIsTag(source_white_profile, cmsSigProfileDescriptionTag))
{
cmsUInt32Number n = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, NULL, 0);
if(n)
{
desc = calloc( n+1, sizeof(char) );
if(!desc) goto lcm2CreateAbstractTemperatureProfileClean;
cmsUInt32Number nr = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, desc, n);
if(n != nr)
lcm2msg_p( 301, NULL, "found propblem reading desc tag: %d %d", n,nr);
}
}
source_white = cmsReadTag( source_white_profile, cmsSigMediaWhitePointTag ); // MediaWhitePointTag
}
i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i) { i_curve[i] = i_curve[0]; }
if(!error)
{
cmsCIExyY xyWhitePoint;
cmsFloat64Number TempK = kelvin;
/* 4000 - 25000 K */
cmsWhitePointFromTemp( &xyWhitePoint, TempK );
cmsCIEXYZ WhitePoint;
const cmsCIEXYZ * reference_white = cmsD50_XYZ();
float max_brightness;
cmsxyY2XYZ( &WhitePoint, &xyWhitePoint );
cmsCIELab LabWhitePoint;
cmsCIELab SrcLabWhitePoint;
if(source_white)
reference_white = source_white;
cmsXYZ2Lab( reference_white, &LabWhitePoint, &WhitePoint );
icc_ab[0] = LabWhitePoint.a/128.0;
icc_ab[1] = LabWhitePoint.b/128.0;
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
cmsXYZ2Lab( cmsD50_XYZ(), &SrcLabWhitePoint, reference_white );
cmsXYZ2Lab( cmsD50_XYZ(), &LabWhitePoint, &WhitePoint );
lcm2msg_p( 302, NULL, "SrcW: %g %g %g LabW: %g %g %g diff: %g %g max brightness: %g",
SrcLabWhitePoint.L, SrcLabWhitePoint.a, SrcLabWhitePoint.b,
LabWhitePoint.L, LabWhitePoint.a, LabWhitePoint.b,
icc_ab[0], icc_ab[1], max_brightness );
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
if(error) goto lcm2CreateAbstractTemperatureProfileClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Reddish %d K (www.oyranos.org)", (int)kelvin );
} else if(icc_ab[1] == 0) {
sprintf( kelvin_name, "%d K (www.oyranos.org)", (int)kelvin );
kelvin_meta[1] = "neutral,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bluish %d K (www.oyranos.org)", (int)kelvin );
kelvin_meta[1] = "bluish,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
if(source_white_profile)
{
if(desc && strlen(desc) < 900)
sprintf( &kelvin_name[strlen(kelvin_name)], " - %s", desc);
if(icc_ab[1] > 0)
{
kelvin_meta[1] = "reddish,white_point,atom,device";
kelvin_meta[3] = "yes,reddish,kelvin";
} else if(icc_ab[1] == 0) {
kelvin_meta[1] = "neutral,white_point,atom,device";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
kelvin_meta[1] = "bluish,white_point,atom,device";
kelvin_meta[3] = "yes,bluish,kelvin";
}
}
if(!error)
/* profile fragment creation */
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2017",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"CIE*Lab",
"http://www.cie.co.at",
NULL);
if(!profile) error = 1;
if(!error)
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointLab, icc_ab,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractTemperatureProfileClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
*my_abstract_file_name = kelvin_name;
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateAbstractWhitePointProfileLab
* @brief Create a effect profile of type abstract in ICC*Lab PCS for white point adjustment
*
* These profiles can be applied to 1D / per single channel only adjustments.
* It will be marked with EFFECT_linear=yes in the meta tag.
*
* @param[in] cie_a CIE*a correction value in -0.5 - 0.5 range
* @param[in] cie_b CIE*b correction value in -0.5 - 0.5 range
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2017/06/02 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractWhitePointProfileLab (
double cie_a,
double cie_b,
int grid_size,
double icc_profile_version,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,white_point,linear,atom",
"EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */
"EFFECT_type", "CIEab",
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_ab[2] = {cie_a, cie_b};
if(error) return 1;
i_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i)
{ i_curve[i] = i_curve[0]; }
if(!error)
{
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
double max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
if(error) goto lcm2CreateAbstractWhitePointProfileClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Reddish CIE*a %g CIE*b %g", cie_a, cie_b );
} else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) {
sprintf( kelvin_name, "CIE*a %g CIE*b %g", cie_a, cie_b );
kelvin_meta[1] = "neutral,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bluish CIE*a %g CIE*b %g", cie_a, cie_b );
kelvin_meta[1] = "bluish,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2018",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"CIE*Lab",
"http://www.cie.co.at",
NULL);
if(!profile) goto lcm2CreateAbstractWhitePointProfileClean;
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointLab, icc_ab,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractWhitePointProfileClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
*my_abstract_file_name = kelvin_name;
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateAbstractWhitePointProfileBradford
* @brief Create a effect profile of type abstract in ICC*XYZ PCS for white point adjustment
*
* These profiles can be applied to 1D / per single channel only adjustments.
* It will be marked with EFFECT_linear=yes in the meta tag.
*
* @param[in] src_iccXYZ source media white point;
* The triple is stored in meta:COLORIMETRY_white_point_xyz_src.
* @param[in] src_name source media white point name or profile; optional
* @param[in] illu_iccXYZ ICC*XYZ illuminant in 0.0 - 2.0 range;
* The triple stored in meta:COLORIMETRY_white_point_xyz_dst.
* @param[in] illu_name target illuminant name; optional
* @param[in] scale scale factor to multiply the chromatice outgoing white point;
* optional, It is usually be set to the scale factor. Can be set to
* *scale == 0 for a generic scale factor, which might be good for a sRGB gamut.
* @param[in,out] icc_profile_version 4.3 is supported
* @param[in] flags - 0x01 : return only fast my_abstract_file_name, without expensive profile computation
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2019/03/17
* @since 2017/06/02 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractWhitePointProfileBradford (
double * src_iccXYZ,
const char * src_name,
double * illu_iccXYZ,
const char * illu_name,
double * scale,
double icc_profile_version OY_UNUSED,
int flags,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * m_curves[3] = {NULL,NULL,NULL}, * b_curves[3] = {NULL,NULL,NULL};
int i;
char white_point_xyz_src[64] = {0},
white_point_xyz_dst[64] = {0},
brightness_scale[16] = {0};
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,type,white_point,linear,atom",
"EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"COLORIMETRY_white_point_icc_xyz_src", white_point_xyz_src,
"COLORIMETRY_white_point_icc_xyz_dst", white_point_xyz_dst,
"COLORIMETRY_white_point_scale", brightness_scale,
"EFFECT_type", "bradford",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0,
0,0,
0,0
};
int len = (src_name ? strlen(src_name) : 0) + (illu_name ? strlen(illu_name) : 0) + 1024;
char * kelvin_name = malloc(len);
int error = !kelvin_name;
cmsCIEXYZ SourceWhitePt = { src_iccXYZ[0], src_iccXYZ[1], src_iccXYZ[2]},
Illuminant = {illu_iccXYZ[0], illu_iccXYZ[1], illu_iccXYZ[2]};
double icc_ab[2] = {0,0};
double matrix[3][3] = {{1.0, 0.0, 0.0},
{0.0, 1.0, 0.0},
{0.0, 0.0, 1.0}};
double max_brightness, b_scale;
#ifdef HAVE_LOCALE_H
char * old_loc;
#endif
if(error) return 1;
#ifdef HAVE_LOCALE_H
old_loc = strdup(setlocale(LC_ALL,NULL));
#endif
if(scale)
b_scale = *scale;
else
b_scale = 0.0;
if(!(flags & 0x01)) /* skip computation */
{
m_curves[0] = cmsBuildGamma(0, 1.0);
if(!m_curves[0]) error = 1;
for(i = 1; i < 3; ++i)
{ m_curves[i] = m_curves[0]; }
b_curves[0] = cmsBuildGamma(0, 1.0);
if(!b_curves[0]) error = 1;
for(i = 1; i < 3; ++i)
{ b_curves[i] = b_curves[0]; }
}
if(!error)
{
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
#ifndef OY_MAX
#define OY_MAX(a,b) (((a) > (b)) ? (a) : (b))
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
double src_Lab[3], dst_Lab[3];
lcm2iccXYZ2iccLab( src_iccXYZ, src_Lab );
lcm2iccXYZ2iccLab( illu_iccXYZ, dst_Lab );
icc_ab[0] = dst_Lab[1] - src_Lab[1];
icc_ab[1] = dst_Lab[2] - src_Lab[2];
max_brightness = OY_HYP(icc_ab[0],icc_ab[1]/1.2);
/* avoid color clipping around the white point */
if(b_scale == 0.0)
b_scale = OY_MAX( 1.0 - max_brightness, 0.2 );
#ifdef HAVE_LOCALE_H
setlocale(LC_ALL,"C");
#endif
sprintf( white_point_xyz_src, "%f,%f,%f", src_iccXYZ[0], src_iccXYZ[1], src_iccXYZ[2] );
sprintf( white_point_xyz_dst, "%f,%f,%f", illu_iccXYZ[0], illu_iccXYZ[1], illu_iccXYZ[2] );
sprintf( brightness_scale, "%f", b_scale );
#ifdef HAVE_LOCALE_H
setlocale(LC_ALL,old_loc);
#endif
/* avoid color clipping around the white point, with PCS*XYZ scaling */
if(scale && *scale != 0.0)
b_scale = OY_MAX( 1.0 - max_brightness * LCM2_ADAPT_TO_PCS_XYZ, 0.2 );
if(scale)
*scale = b_scale;
}
if(error) goto lcm2CreateAbstractWhitePointProfileBClean;
#ifdef HAVE_LOCALE_H
setlocale(LC_ALL,"C");
#endif
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Bradford Reddish CIE*a %g CIE*b %g", icc_ab[0], icc_ab[1] );
} else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) {
sprintf( kelvin_name, "Bradford CIE*a %g CIE*b %g", icc_ab[0], icc_ab[1] );
kelvin_meta[1] = "neutral,type,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bradford Bluish CIE*a %g CIE*b %g", icc_ab[0], icc_ab[1] );
kelvin_meta[1] = "bluish,type,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
#ifdef HAVE_LOCALE_H
setlocale(LC_ALL,old_loc);
#endif
if(src_name)
sprintf( &kelvin_name[strlen(kelvin_name)], " src:%s", src_name );
if(illu_name)
sprintf( &kelvin_name[strlen(kelvin_name)], " illu:%s", illu_name );
sprintf( &kelvin_name[strlen(kelvin_name)], " v2 lcm2" );
*my_abstract_file_name = kelvin_name;
if(flags & 0x01) /* skip computation */
{
#ifdef HAVE_LOCALE_H
if(old_loc) free(old_loc);
#endif
return error;
}
profile = lcm2CreateProfileFragment (
"*xyz", // CIE*XYZ
"*xyz", // CIE*XYZ
4.3, //icc_profile_version,
kelvin_name,
"Oyranos project 2019",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"Bradford",
"http://www.cie.co.at",
NULL);
kelvin_name = NULL;
if(!profile) goto lcm2CreateAbstractWhitePointProfileBClean;
lcm2MAT3 Bradford, BB, Brightness = {{ {{b_scale,0,0}}, {{0,b_scale,0}}, {{0,0,b_scale}} }};
if( !lcm2AdaptationMatrix(&Bradford, NULL, &SourceWhitePt, &Illuminant) ) goto lcm2CreateAbstractWhitePointProfileBClean;
/** Scale the adaption matrix to avoid clipping. Scale factor is stored in meta:COLORIMETRY_white_point_scale. */
lcm2MAT3per(&BB, &Bradford, &Brightness);
matrix[0][0] = BB.v[0].n[0]; matrix[0][1] = BB.v[0].n[1]; matrix[0][2] = BB.v[0].n[2];
matrix[1][0] = BB.v[1].n[0]; matrix[1][1] = BB.v[1].n[1]; matrix[1][2] = BB.v[1].n[2];
matrix[2][0] = BB.v[2].n[0]; matrix[2][1] = BB.v[2].n[1]; matrix[2][2] = BB.v[2].n[2];
error = lcm2CreateProfileLutByMatrixAndCurves( profile,
m_curves, &matrix[0][0], b_curves,
"*xyz", "*xyz",
cmsSigAToB0Tag );
if(!error)
{
int pos = 2*10;
if(src_name)
{
kelvin_meta[pos++] = "COLORIMETRY_white_point_name_src";
kelvin_meta[pos++] = src_name;
}
if(illu_name)
{
kelvin_meta[pos++] = "COLORIMETRY_white_point_name_dst";
kelvin_meta[pos++] = illu_name;
}
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
}
lcm2CreateAbstractWhitePointProfileBClean:
if(m_curves[0]) cmsFreeToneCurve( m_curves[0] );
if(b_curves[0]) cmsFreeToneCurve( b_curves[0] );
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
lcm2Free_m(kelvin_name);
#ifdef HAVE_LOCALE_H
if(old_loc) free(old_loc);
#endif
return error;
}
/** Function lcm2CreateProfileFragment
* @brief Create a color profile starter
*
* In case both the in_space_profile and out_space_profile arguments are set
* to "*lab", the profile will be set to class abstract. In case the
* in_space_profile is not "*lab" and the later one is different, a color
* profile of class input will be generated. With in_space_profile not "*lab"
* and out_space_profile "*lab" a color profile of class output will be
* generated. Note such profiles have initially no backward LUT and can not
* be used for inverse color transforms, which might be a problem for general
* purpose ICC profiles. But you can add more tables if needed by passing in a
* previously created profile.
*
* All profiles generated by this function are meant to be filled with
* colorimetric data by e.g. lcm2CreateProfileLutByFunc() or
* lcm2CreateICCMatrixProfile2().
*
* Here a code example:
* @code
cmsHPROFILE profile = lcm2CreateProfileFragment (
"*srgb", // sRGB
"*lab", // CIE*Lab
2.3,
"MySpace (MyProject)",
"My Project 2016",
"My Name",
ICC_2011_LICENSE,
"My Box", "www.mydomain.net", NULL
);
@endcode
*
* @param[in] in_space_profile input color space; for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space; for wildcards see
* lcm2OpenProfileFile()
* @param[in] icc_profile_version 2.3 or 4.3
* @param[in] my_abstract_description internal profile name
* @param[in] provider e.g. "My Project 2016"
* @param[in] vendor e.g. "My Name"
* @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile."
* first %s is provider string arg and
* second %s is filled by vendor string arg
* @param[in] device_model e.g. "My Set"
* @param[in] device_manufacturer e.g. "www.mydomain.net"; hint:
* lcms <= 2.08 writes a malformed desc tag
* @param[in,out] h_profile use existing profile; optional
*
* @version Oyranos: 0.9.6
* @date 2016/03/06
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
cmsHPROFILE lcm2CreateProfileFragment(
const char * in_space_profile,
const char * out_space_profile,
double icc_profile_version,
const char * my_abstract_description,
const char * provider,
const char * vendor,
const char * my_license,
const char * device_model,
const char * device_manufacturer,
cmsHPROFILE h_profile
)
{
cmsHPROFILE h_in_space = 0,
h_out_space = 0;
cmsColorSpaceSignature csp_in, csp_out;
cmsProfileClassSignature profile_class = cmsSigAbstractClass;
cmsMLU * mlu[4] = {0,0,0,0};
int i;
char * license = NULL;
if(!h_profile)
{ h_profile = cmsCreateProfilePlaceholder( 0 ); } if(!h_profile) goto lcm2CreateProfileFragmentClean;
if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL );
if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL );
csp_in = cmsGetColorSpace( h_in_space );
csp_out = cmsGetColorSpace( h_out_space );
cmsSetProfileVersion( h_profile, icc_profile_version );
#define CSP_IS_PCS(csp) (csp == cmsSigLabData || csp == cmsSigXYZData)
if( CSP_IS_PCS(csp_in) && CSP_IS_PCS(csp_out) )
profile_class = cmsSigAbstractClass;
else if( CSP_IS_PCS(csp_out) )
profile_class = cmsSigInputClass;
else if( CSP_IS_PCS(csp_in) )
profile_class = cmsSigOutputClass;
else
profile_class = cmsSigLinkClass;
cmsSetDeviceClass( h_profile, profile_class );
cmsSetColorSpace( h_profile, csp_in );
cmsSetPCS( h_profile, csp_out );
for(i = 0; i < 4; ++i)
mlu[i] = cmsMLUalloc(0,1);
if(!(mlu[0] && mlu[1] && mlu[2] && mlu[3]))
return h_profile;
cmsMLUsetASCII(mlu[0], "EN", "us", my_abstract_description);
cmsWriteTag( h_profile, cmsSigProfileDescriptionTag, mlu[0] );
if(device_model)
{
cmsMLUsetASCII(mlu[1], "EN", "us", device_model);
cmsWriteTag( h_profile, cmsSigDeviceModelDescTag, mlu[1]);
}
if(device_manufacturer)
{
cmsMLUsetASCII(mlu[2], "EN", "us", device_manufacturer);
cmsWriteTag( h_profile, cmsSigDeviceMfgDescTag, mlu[2]);
}
license = (char *) malloc( strlen(my_license) + strlen(provider) + strlen(vendor) + 1 );
if(!license) goto lcm2CreateProfileFragmentClean;;
sprintf( license, my_license, provider, vendor );
cmsMLUsetASCII(mlu[3], "EN", "us", license);
cmsWriteTag( h_profile, cmsSigCopyrightTag, mlu[3]);
cmsWriteTag( h_profile, cmsSigMediaWhitePointTag, cmsD50_XYZ() );
lcm2CreateProfileFragmentClean:
if(h_in_space) { cmsCloseProfile( h_in_space ); } h_in_space = 0;
if(h_out_space) { cmsCloseProfile( h_out_space ); } h_out_space = 0;
for(i = 0; i < 4; ++i)
cmsMLUfree( mlu[i] );
lcm2Free_m(license);
return h_profile;
}
int isBigEndian ()
{ union { unsigned short u16; unsigned char c; } test = { .u16 = 1 }; return !test.c; }
/* UTF-8 to WCHAR_T conversion */
typedef uint32_t UTF32; /* at least 32 bits */
typedef uint16_t UTF16; /* at least 16 bits */
typedef uint8_t UTF8; /* typically 8 bits */
typedef unsigned char Boolean; /* 0 or 1 */
/* Some fundamental constants */
#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
#define UNI_MAX_BMP (UTF32)0x0000FFFF
#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
typedef enum {
conversionOK, /* conversion successful */
sourceExhausted, /* partial character in source, but hit end */
targetExhausted, /* insuff. room in target for conversion */
sourceIllegal /* source sequence is illegal/malformed */
} lcm2UtfConversionResult;
typedef enum {
strictConversion = 0,
lenientConversion
} lcm2UtfConversionFlags;
static const int halfShift = 10; /* used for shifting by 10 bits */
static const UTF32 halfBase = 0x0010000UL;
static const UTF32 halfMask = 0x3FFUL;
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_HIGH_END (UTF32)0xDBFF
#define UNI_SUR_LOW_START (UTF32)0xDC00
#define UNI_SUR_LOW_END (UTF32)0xDFFF
#define false 0
#define true 1
/*
* Index into the table below with the first byte of a UTF-8 sequence to
* get the number of trailing bytes that are supposed to follow it.
* Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
* left as-is for anyone who may want to do such conversion, which was
* allowed in earlier algorithms.
*/
static const char trailingBytesForUTF8[256] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
/*
* Magic values subtracted from a buffer value during UTF8 conversion.
* This table contains as many values as there might be trailing bytes
* in a UTF-8 sequence.
*/
static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
0x03C82080UL, 0xFA082080UL, 0x82082080UL };
/*
* Utility routine to tell whether a sequence of bytes is legal UTF-8.
* This must be called with the length pre-determined by the first byte.
* If not calling this from ConvertUTF8to*, then the length can be set by:
* length = trailingBytesForUTF8[*source]+1;
* and the sequence is illegal right away if there aren't that many bytes
* available.
* If presented with a length > 4, this returns false. The Unicode
* definition of UTF-8 goes up to 4-byte sequences.
*/
static Boolean isLegalUTF8(const UTF8 *source, int length)
{
UTF8 a;
const UTF8 *srcptr = source+length;
switch (length) {
default: return false;
/* Everything else falls through when "true"... */
case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH
case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH
case 2: if ((a = (*--srcptr)) > 0xBF) return false;
switch (*source) {
/* no fall-through in this inner switch */
case 0xE0: if (a < 0xA0) return false; break;
case 0xED: if (a > 0x9F) return false; break;
case 0xF0: if (a < 0x90) return false; break;
case 0xF4: if (a > 0x8F) return false; break;
default: if (a < 0x80) return false; OY_FALLTHROUGH
} OY_FALLTHROUGH
case 1: if (*source >= 0x80 && *source < 0xC2) return false;
}
if (*source > 0xF4) return false;
return true;
}
lcm2UtfConversionResult lcm2ConvertUTF8toUTF16 (const UTF8** sourceStart, const UTF8* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, lcm2UtfConversionFlags flags)
{
lcm2UtfConversionResult result = conversionOK;
const UTF8* source = *sourceStart;
UTF16* target = *targetStart;
while (source < sourceEnd) {
UTF32 ch = 0;
unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
if (source + extraBytesToRead >= sourceEnd) {
result = sourceExhausted; break;
}
/* Do this check whether lenient or strict */
if (! isLegalUTF8(source, extraBytesToRead+1)) {
result = sourceIllegal;
break;
}
/*
* The cases all fall through. See "Note A" below.
*/
switch (extraBytesToRead) {
case 5: ch += *source++; ch <<= 6; OY_FALLTHROUGH/* remember, illegal UTF-8 */
case 4: ch += *source++; ch <<= 6; OY_FALLTHROUGH /* remember, illegal UTF-8 */
case 3: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 2: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 1: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 0: ch += *source++; OY_FALLTHROUGH
}
ch -= offsetsFromUTF8[extraBytesToRead];
if (target >= targetEnd) {
source -= (extraBytesToRead+1); /* Back up source pointer! */
result = targetExhausted; break;
}
if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
/* UTF-16 surrogate values are illegal in UTF-32 */
if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
if (flags == strictConversion) {
source -= (extraBytesToRead+1); /* return to the illegal value itself */
result = sourceIllegal;
break;
} else {
*target++ = UNI_REPLACEMENT_CHAR;
}
} else {
*target++ = (UTF16)ch; /* normal case */
}
} else if (ch > UNI_MAX_UTF16) {
if (flags == strictConversion) {
result = sourceIllegal;
source -= (extraBytesToRead+1); /* return to the start */
break; /* Bail out; shouldn't continue */
} else {
*target++ = UNI_REPLACEMENT_CHAR;
}
} else {
/* target is a character in range 0xFFFF - 0x10FFFF. */
if (target + 1 >= targetEnd) {
source -= (extraBytesToRead+1); /* Back up source pointer! */
result = targetExhausted; break;
}
ch -= halfBase;
*target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
*target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
}
}
*sourceStart = source;
*targetStart = target;
return result;
}
wchar_t * lcm2Utf8ToWchar ( const char * text )
{
wchar_t * wchar_out, * tmp_out;
char * in, * tmp_in;
size_t in_len = strlen(text),
out_len = in_len*sizeof(wchar_t)+sizeof(wchar_t);
lcm2UtfConversionResult error;
if(!in_len) return 0;
else ++in_len;
tmp_out = wchar_out = calloc( in_len+1, sizeof(wchar_t) );
in = tmp_in = strdup( text );
error = lcm2ConvertUTF8toUTF16( (const UTF8**)&in, (const UTF8*)in+in_len, (UTF16**)&tmp_out, (UTF16*)(tmp_out+out_len), lenientConversion );
if(error == conversionOK)
{
/* store UTF16BE in wchar_t for lcms2 */
uint16_t * icc_utf16 = (uint16_t*) wchar_out;
int i;
for(i = in_len; i >= 0; --i) wchar_out[i] = icc_utf16[i];
}
else
{
lcm2msg_p( 300, NULL, "error[%d] %lu %lu %s", error, in_len, out_len, text );
lcm2Free_m(wchar_out);
}
lcm2Free_m( tmp_in );
return wchar_out;
}
/** Function lcm2AddMluDescription
* @brief Add translated texts to a profile
*
* Iterates over the provided string list converts from "UTF-8" input
* to "WCHAR_T" for lcms and
* does byteswapping on little endian machines.
*
* Here a code example:
* @code
const char * texts[] = {
"de", "DE", "Mein Text",
"en", "US", "My Text"
};
lcm2AddMluDescription ( profile, texts,
cmsSigProfileDescriptionMLTag
);
@endcode
*
* @param[in,out] profile color profile
* @param[in] texts language + country + text list
* @param[in] tag_sig signature
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2AddMluDescription ( cmsHPROFILE profile,
const char * texts[],
cmsTagSignature tag_sig )
{
int n = 0, i;
cmsMLU * mlu = NULL;
if(texts)
while( texts[n] ) ++n;
if(!n) return;
mlu = cmsMLUalloc( 0, n/3 + 1 );
if(!mlu) return;
for( i = 0; i < n; i += 3 )
{
char lang[4] = {0,0,0,0}, country[4] = {0,0,0,0};
const char * text = texts[i+2];
wchar_t * wchar_out;
wchar_out = lcm2Utf8ToWchar( text );
if(!wchar_out) continue;
/* the language code is stored as readable 4 byte string */
lang[0] = texts[i+0][0]; lang[1] = texts[i+0][1];
country[0] = texts[i+1][0]; country[1] = texts[i+1][1];
cmsMLUsetWide( mlu, lang, country, wchar_out );
lcm2Free_m( wchar_out );
}
cmsWriteTag( profile, tag_sig, mlu );
cmsMLUfree( mlu );
}
/** Function lcm2AddMetaTexts
* @brief Add meta data to a profile
*
* Iterates over the provided string list converts from "UTF-8" input
* to "WCHAR_T" for lcms and
* does byteswapping on little endian machines.
*
* Here a code example:
* @code
const char * texts[] = {
"GROUP_key1", "value1",
"DOMAIN_key2", "value2"
};
lcm2AddMetaTexts ( profile, "GROUP_,DOMAIN_", texts,
cmsSigMetaTag
);
@endcode
*
* A prefix allows for grouping of keys like "EDID_" or "EXIF_".
* The prefix part might be cut off in some cases to access an other level
* of keys. Think of "EDID_model" for monitors and "EXIF_model" for cameras,
* which both represent the key "model" concept.
*
* @param[in,out] profile color profile
* @param[in] prefixes The used uppercase prefix list.
* @param[in] key_value key + value list
* @param[in] tag_sig signature
*
* @version Oyranos: 0.9.7
* @date 2017/02/11
* @since 2017/02/11 (Oyranos: 0.9.7)
*/
void lcm2AddMetaTexts ( cmsHPROFILE profile,
const char * prefixes,
const char * key_value[],
cmsTagSignature tag_sig )
{
int n = 0, i;
cmsHANDLE dict = NULL;
cmsContext contextID = cmsCreateContext( NULL,NULL );
wchar_t * wchar_key = NULL, * wchar_val = NULL;
if(key_value)
while( key_value[n] ) ++n;
if(n)
dict = cmsDictAlloc( contextID );
else
lcm2msg_p( 300, NULL, "nothing to write %s", __func__ );
if(!dict)
return;
if(prefixes)
{
wchar_key = lcm2Utf8ToWchar( "prefix" );
wchar_val = lcm2Utf8ToWchar( prefixes );
}
if(wchar_key && wchar_val)
cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL );
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
for( i = 0; i < n; i += 2 )
{
const char * key = key_value[i+0],
* val = key_value[i+1];
wchar_key = lcm2Utf8ToWchar(key),
wchar_val = lcm2Utf8ToWchar(val);
if(!wchar_key || !wchar_val)
{
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
continue;
}
cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL );
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
}
cmsWriteTag( profile, tag_sig, dict );
cmsDictFree( dict );
}
/** Function lcm2CreateICCMatrixProfile2
* @brief Create a profile from primaries, white point and one gamma value
*
* Used for ICC from EDID, Camera RAW etc. Marti calls these matrix/shaper.
* @code
// create linear space with REC.709/sRGB primaries and D65 white point
cmsHPROFILE h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64,0.33, 0.30,0.60, 0.15,0.06, 0.3127,0.329 );
@endcode
*
* @version Oyranos: 0.9.6
* @date 2016/03/04
* @since 2009/10/24 (Oyranos: 0.1.10)
*/
cmsHPROFILE lcm2CreateICCMatrixProfile2 (
float gamma,
float rx, float ry,
float gx, float gy,
float bx, float by,
float wx, float wy )
{
cmsCIExyYTRIPLE p;
cmsToneCurve * g[3] = {0,0,0};
/* 0.31271, 0.32902 D65 */
cmsCIExyY wtpt_xyY;
cmsHPROFILE lp = 0;
p.Red.x = rx;
p.Red.y = ry;
p.Red.Y = 1.0;
p.Green.x = gx;
p.Green.y = gy;
p.Green.Y = 1.0;
p.Blue.x = bx;
p.Blue.y = by;
p.Blue.Y = 1.0;
wtpt_xyY.x = wx;
wtpt_xyY.y = wy;
wtpt_xyY.Y = 1.0;
g[0] = g[1] = g[2] = cmsBuildGamma(0, (double)gamma);
if(!g[0]) return NULL;
lp = cmsCreateRGBProfile( &wtpt_xyY, &p, g);
cmsFreeToneCurve( g[0] );
return lp;
}
/** Function lcm2MessageFunc
* @brief default message function to console
*
* The default message function is used as a message printer to the console
* from library start.
*
* @param code a message code understood be your message
* handler or openiccMSG_e
* @param context_object a openicc object is expected
* @param format the text format string for following args
* @param ... the variable args fitting to format
* @return 0 - success; 1 - error
*
* @version OpenICC: 0.1.0
* @date 2009/07/20
* @since 2008/04/03 (OpenICC: 0.1.0)
*/
int lcm2MessageFunc ( int/*openiccMSG_e*/ code OY_UNUSED,
const void * context_object OY_UNUSED,
const char * format,
... )
{
char * text = 0;
int error = 0;
va_list list;
size_t sz = 0;
int len = 0;
va_start( list, format);
len = vsnprintf( text, sz, format, list);
va_end ( list );
{
text = calloc( sizeof(char), len+2 );
if(!text)
{
fprintf(stderr, "Could not allocate 256 byte of memory.\n");
return 1;
}
va_start( list, format);
len = vsnprintf( text, len+1, format, list);
va_end ( list );
}
if(text)
fprintf( stderr, "%s\n", text );
lcm2Free_m( text );
return error;
}
lcm2Message_f lcm2msg_p = lcm2MessageFunc;
/** @brief set a custom message function
*
* Use to connect to user message system.
*/
int lcm2MessageFuncSet ( lcm2Message_f message_func )
{
if(message_func)
lcm2msg_p = message_func;
else
lcm2msg_p = lcm2MessageFunc;
return 1;
}
/** @brief run time API version
*/
int lcm2Version ( )
{
return LCM2PROFILER_API;
}
// Compute chromatic adaptation matrix using Chad as cone matrix
static cmsBool ComputeChromaticAdaptation(lcm2MAT3* Conversion,
const cmsCIEXYZ* SourceWhitePoint,
const cmsCIEXYZ* DestWhitePoint,
const lcm2MAT3* Chad)
{
lcm2MAT3 Chad_Inv;
lcm2VEC3 ConeSourceXYZ, ConeSourceRGB;
lcm2VEC3 ConeDestXYZ, ConeDestRGB;
lcm2MAT3 Cone, Tmp;
Tmp = *Chad;
if (!lcm2MAT3inverse(&Tmp, &Chad_Inv)) return FALSE;
lcm2VEC3init(&ConeSourceXYZ, SourceWhitePoint -> X,
SourceWhitePoint -> Y,
SourceWhitePoint -> Z);
lcm2VEC3init(&ConeDestXYZ, DestWhitePoint -> X,
DestWhitePoint -> Y,
DestWhitePoint -> Z);
lcm2MAT3eval(&ConeSourceRGB, Chad, &ConeSourceXYZ);
lcm2MAT3eval(&ConeDestRGB, Chad, &ConeDestXYZ);
// Build matrix
lcm2VEC3init(&Cone.v[0], ConeDestRGB.n[0]/ConeSourceRGB.n[0], 0.0, 0.0);
lcm2VEC3init(&Cone.v[1], 0.0, ConeDestRGB.n[1]/ConeSourceRGB.n[1], 0.0);
lcm2VEC3init(&Cone.v[2], 0.0, 0.0, ConeDestRGB.n[2]/ConeSourceRGB.n[2]);
// Normalize
lcm2MAT3per(&Tmp, &Cone, Chad);
lcm2MAT3per(Conversion, &Chad_Inv, &Tmp);
return TRUE;
}
/** Returns the final chrmatic adaptation from illuminant FromIll to Illuminant ToIll.
* The cone matrix can be specified in ConeMatrix. If NULL, Bradford is assumed
*/
cmsBool lcm2AdaptationMatrix(lcm2MAT3* r, const lcm2MAT3* ConeMatrix, const cmsCIEXYZ* FromIll, const cmsCIEXYZ* ToIll)
{
lcm2MAT3 LamRigg = {{ // Bradford matrix
{{ 0.8951, 0.2664, -0.1614 }},
{{ -0.7502, 1.7135, 0.0367 }},
{{ 0.0389, -0.0685, 1.0296 }}
}};
if (ConeMatrix == NULL)
ConeMatrix = &LamRigg;
return ComputeChromaticAdaptation(r, FromIll, ToIll, ConeMatrix);
}
/* The lcm2VEC3, lcm2MAT3, lcm2MAT3inverse, lcm2VEC3init and lcm2MAT3per definitions
* origin from lcms2' cmsmtrx.c written by Marti Maria www.littlecms.com
* and is MIT licensed there
* Vectors
*/
#define MATRIX_DET_TOLERANCE 0.0001
/** Inverse of a matrix b = a^(-1) */
int lcm2MAT3inverse(const lcm2MAT3* a, lcm2MAT3* b)
{
double det, c0, c1, c2;
c0 = a -> v[1].n[1]*a -> v[2].n[2] - a -> v[1].n[2]*a -> v[2].n[1];
c1 = -a -> v[1].n[0]*a -> v[2].n[2] + a -> v[1].n[2]*a -> v[2].n[0];
c2 = a -> v[1].n[0]*a -> v[2].n[1] - a -> v[1].n[1]*a -> v[2].n[0];
det = a -> v[0].n[0]*c0 + a -> v[0].n[1]*c1 + a -> v[0].n[2]*c2;
if (fabs(det) < MATRIX_DET_TOLERANCE) return 0; // singular matrix; can't invert
b -> v[0].n[0] = c0/det;
b -> v[0].n[1] = (a -> v[0].n[2]*a -> v[2].n[1] - a -> v[0].n[1]*a -> v[2].n[2])/det;
b -> v[0].n[2] = (a -> v[0].n[1]*a -> v[1].n[2] - a -> v[0].n[2]*a -> v[1].n[1])/det;
b -> v[1].n[0] = c1/det;
b -> v[1].n[1] = (a -> v[0].n[0]*a -> v[2].n[2] - a -> v[0].n[2]*a -> v[2].n[0])/det;
b -> v[1].n[2] = (a -> v[0].n[2]*a -> v[1].n[0] - a -> v[0].n[0]*a -> v[1].n[2])/det;
b -> v[2].n[0] = c2/det;
b -> v[2].n[1] = (a -> v[0].n[1]*a -> v[2].n[0] - a -> v[0].n[0]*a -> v[2].n[1])/det;
b -> v[2].n[2] = (a -> v[0].n[0]*a -> v[1].n[1] - a -> v[0].n[1]*a -> v[1].n[0])/det;
return 1;
}
/* Axis of the matrix/array. No specific meaning at all. */
#define VX 0
#define VY 1
#define VZ 2
/** Initiate a vector */
void lcm2VEC3init(lcm2VEC3* r, double x, double y, double z)
{
r -> n[VX] = x;
r -> n[VY] = y;
r -> n[VZ] = z;
}
/** Multiply two matrices */
void lcm2MAT3per(lcm2MAT3* r, const lcm2MAT3* a, const lcm2MAT3* b)
{
#define ROWCOL(i, j) \
a->v[i].n[0]*b->v[0].n[j] + a->v[i].n[1]*b->v[1].n[j] + a->v[i].n[2]*b->v[2].n[j]
lcm2VEC3init(&r-> v[0], ROWCOL(0,0), ROWCOL(0,1), ROWCOL(0,2));
lcm2VEC3init(&r-> v[1], ROWCOL(1,0), ROWCOL(1,1), ROWCOL(1,2));
lcm2VEC3init(&r-> v[2], ROWCOL(2,0), ROWCOL(2,1), ROWCOL(2,2));
#undef ROWCOL //(i, j)
}
/** Evaluate a vector across a matrix */
void CMSEXPORT lcm2MAT3eval(lcm2VEC3* r, const lcm2MAT3* a, const lcm2VEC3* v)
{
r->n[VX] = a->v[0].n[VX]*v->n[VX] + a->v[0].n[VY]*v->n[VY] + a->v[0].n[VZ]*v->n[VZ];
r->n[VY] = a->v[1].n[VX]*v->n[VX] + a->v[1].n[VY]*v->n[VY] + a->v[1].n[VZ]*v->n[VZ];
r->n[VZ] = a->v[2].n[VX]*v->n[VX] + a->v[2].n[VY]*v->n[VY] + a->v[2].n[VZ]*v->n[VZ];
}
/* end of lcms code */
/** convert a matrix to CIE * xy triple */
int lcm2MAT3toCIExyYTriple ( const lcm2MAT3* a, lcm2CIExyYTriple * triple )
{
int i,j,
fail=0;
double sum;
for(i = 0; i < 3; ++i)
{
for(j = 0; j < 3; ++j)
{
if(i < 3 && a->v[i].n[j] == 0)
fail = 1;
}
sum = a->v[i].n[0]+a->v[i].n[1]+a->v[i].n[2];
if(sum != 0)
{
triple->v[i].xy[0] = a->v[i].n[0]/sum;
triple->v[i].xy[1] = a->v[i].n[1]/sum;
} else
{
triple->v[i].xy[0] = 1;
triple->v[i].xy[1] = 1;
}
}
return fail;
}
const char * lcm2MAT3show ( const lcm2MAT3* a )
{
static char * t = NULL;
if(!t) t = (char*) malloc(1024);
int i,j;
t[0] = 0;
for(i = 0; i < 3; ++i)
{
for(j = 0; j < 3; ++j)
sprintf( &t[strlen(t)], " %g", a->v[i].n[j]);
sprintf( &t[strlen(t)], "\n" );
}
return t;
}
const char * lcm2Mat34show ( const float a[3][4] )
{
static char * t = NULL;
if(!t) t = (char*) malloc(1024);
int i,j;
t[0] = 0;
for(i = 0; i < 3; ++i)
{
for(j = 0; j < 4; ++j)
sprintf( &t[strlen(t)], " %g", a[i][j]);
sprintf( &t[strlen(t)], "\n" );
}
return t;
}
const char * lcm2Mat4show ( const float a[4] )
{
static char * t = NULL;
if(!t) t = (char*) malloc(1024);
int i;
t[0] = 0;
for(i = 0; i < 4; ++i)
sprintf( &t[strlen(t)], " %g", a[i]);
sprintf( &t[strlen(t)], "\n" );
return t;
}
const char * lcm2Mat43show ( const float a[4][3] )
{
static char * t = NULL;
if(!t) t = (char*) malloc(1024);
int i,j;
t[0] = 0;
for(i = 0; i < 4; ++i)
{
for(j = 0; j < 3; ++j)
sprintf( &t[strlen(t)], " %g", a[i][j]);
sprintf( &t[strlen(t)], "\n" );
}
return t;
}
const char * lcm2CIExyYTriple_Show( lcm2CIExyYTriple * triple )
{
static char * t = NULL;
if(!t) t = (char*) malloc(1024);
int i;
t[0] = 0;
for(i = 0; i < 3; ++i)
{
sprintf( &t[strlen(t)], " x:%g y:%g", triple->v[i].xy[0],
triple->v[i].xy[1]);
sprintf( &t[strlen(t)], "\n" );
}
return t;
}
/** @} */ /* profiler */
/** \addtogroup profiler
*
* Oyranos ICC Profiler API provides a platformindependent C interface to generate
* ICC profiles. It's main purpose is to generate ICC Profiles in a programatic way.
* The only dependency is littleCMS 2
* <a href="http://www.littlecms.com">www.littlecms.com</a>.
* It reduces the need of many of the lcms2
* boilerplate for format independent sampling, multi localised strings from UTF8
* and more. The sampler collection contains effects and color space converters.
* The code consists of one source file and a header. So it can easily
* be placed inside your project.
*
*
* @section api API Documentation
* The Oyranos ICC Profiler API is contained in the lcm2_profiler.h header file.
*
* The high level API takes few arguments and generates a profile in
* one go.
* Effect profiles can be created in one call
* by lcm2CreateAbstractProfile(). It needs a @ref samplers function, which
* fills the Look Up Table (LUT). Three APIs exist to generate white point
* effects, lcm2CreateAbstractTemperatureProfile() and
* lcm2CreateAbstractWhitePointProfileLab() or
* lcm2CreateAbstractWhitePointProfileBradford(). These above high level APIs allow to
* write the profile to disc in one go.
*
* The lower level APIs can be used to customise the profile generation.
* Basic matrix/shaper profiles can be created with
* lcm2CreateICCMatrixProfile2() and filled with custom texts in
* lcm2CreateProfileFragment(). LUT elements can be added with,
* lcm2CreateProfileLutByMatrixAndCurves() and
* lcm2CreateProfileLutByFuncAndCurves(). The later expects a ::lcm2Sampler_f
* function to fill in the LUT.
*
* The following low level code sample comes from @ref lcm2_profiler.c.
* The code sets up a basic profile description and color spaces:
* @dontinclude lcm2_profiler.c
* @code
* // prepare some variables
* double icc_profile_version = 2.3;
* double icc_ab[2] = {0.0, 0.0};
* cmsHPROFILE profile;
* const char * kelvin_name = "5000 K"
* int error;
* int grid_size = 17;
* cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0);
for(i = 1; i < 3; ++i) { i_curve[i] = o_curve[i] = i_curve[0]; }
* @endcode
* @skip fragment
@until cmsSigAToB0Tag
*
* Profile i/o happens with lcm2OpenProfileFile(), which takes file names and
* a few wildcards as arguments. lcm2WriteProfileToFile() helps writing of
* canonical profile names. lcm2WriteProfileToMem() writes a profile to a
* custom memory allocator.
*
* Most of the functions come with examples.
*
*/
|
GB_binop__rminus_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int8)
// A*D function (colscale): GB (_AxD__rminus_int8)
// D*A function (rowscale): GB (_DxB__rminus_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int8)
// C=scalar+B GB (_bind1st__rminus_int8)
// C=scalar+B' GB (_bind1st_tran__rminus_int8)
// C=A+scalar GB (_bind2nd__rminus_int8)
// C=A'+scalar GB (_bind2nd_tran__rminus_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT8 || GxB_NO_RMINUS_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_1x1_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// author:BUG1989 (https://github.com/BUG1989/) Long-term support.
// author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
#if __aarch64__
#if 1
#include "gemm_symm_int8.h"
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(outch, inch, (size_t)1u);
const int8_t *a = _kernel;
int8_t *sa = kernel_tm;
reorder_a((int8_t*)a, sa, outch, inch, inch);
}
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
const size_t n = bottom_blob.w * bottom_blob.h;
const size_t k = bottom_blob.c;
const size_t m = top_blob.c;
ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator);
{
const int8_t* pData = bottom_blob;
int8_t *pReorder = bottom_tm;
reorder_b(pData, pReorder, k, n, bottom_blob.cstep);
}
// GEMM
int32_t *pc = top_blob;
const int8_t *pa = kernel;
const int8_t *pb = bottom_tm;
const size_t ldc = top_blob.cstep;
int8kernel((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr, opt);
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
const size_t n = bottom_blob.w * bottom_blob.h;
const size_t k = bottom_blob.c;
const size_t m = top_blob.c;
ncnn::Mat scales_tm(m);
ncnn::Mat bias_tm(m);
float* scales = scales_tm;
const float* bias = _bias;
// outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
// the equation could convert to:
// out = float2int8( (float)sum * (scale_requant_in * scale_requant_out) + (bias * scale_requant_out) )
// prebuild the list of (scales_requant_in*scale_requant_out)
for (size_t i = 0; i < m; ++i)
{
scales_tm[i] = scales_requant[2*i] * scales_requant[2*i + 1];
}
if (!_bias.empty())
{
for (size_t i = 0; i < m; ++i)
{
bias_tm[i] = bias[i] * scales_requant[2*i + 1];
}
bias = bias_tm;
}
ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator);
{
const int8_t *pData = bottom_blob;
int8_t *pReorder = bottom_tm;
reorder_b(pData, pReorder, k, n, bottom_blob.cstep);
}
// GEMM
int8_t *pc = top_blob;
const int8_t *pa = kernel;
const int8_t *pb = bottom_tm;
const size_t ldc = top_blob.cstep;
int8kernel((void*)pc, pa, pb, m, k, n, ldc, scales, (float*)bias, opt);
}
#else
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
// kernel memory packed 4 x 4
kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
const signed char* k0 = kernel + (p+0)*inch;
const signed char* k1 = kernel + (p+1)*inch;
const signed char* k2 = kernel + (p+2)*inch;
const signed char* k3 = kernel + (p+3)*inch;
signed char* ktmp = kernel_tm.channel(p/4);
int q=0;
for (; q+1<inch; q+=2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp[2] = k1[0];
ktmp[3] = k1[1];
ktmp[4] = k2[0];
ktmp[5] = k2[1];
ktmp[6] = k3[0];
ktmp[7] = k3[1];
ktmp += 8;
k0 += 2;
k1 += 2;
k2 += 2;
k3 += 2;
}
for (; q<inch; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p=remain_outch_start; p<outch; p++)
{
const signed char* k0 = kernel + (p+0)*inch;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
int q=0;
for (; q+1<inch; q=q+2)
{
ktmp[0] = k0[0];
ktmp[1] = k0[1];
ktmp += 2;
k0 += 2;
}
for (; q<inch; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// bottom_tm memory packed 4 x 4
ncnn::Mat bottom_tm(4, inch, size/4 + size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_blob.channel(0);
const signed char* img1 = bottom_blob.channel(1);
img0 += i;
img1 += i;
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img0[1];
tmpptr[3] = img1[1];
tmpptr[4] = img0[2];
tmpptr[5] = img1[2];
tmpptr[6] = img0[3];
tmpptr[7] = img1[3];
tmpptr += 8;
img0 += bottom_blob.cstep;
img0 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
}
for (; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p+1);
int* outptr2 = top_blob.channel(p+2);
int* outptr3 = top_blob.channel(p+3);
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"ld1 {v0.16b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.16b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"rev32 v1.8h, v0.8h \n"// i1, i0, i3, i2
"rev64 v2.4s, v0.4s \n"// i2, i3, i0, i1
"rev64 v3.8h, v0.8h \n"// i3, i2, i1, i0
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"prfm pldl1keep, [%4, #1024] \n"
"prfm pldl1keep, [%5, #1024] \n"
"smlal2 v8.8h, v4.16b, v0.16b \n"
"smlal2 v9.8h, v4.16b, v1.16b \n"
"smlal2 v10.8h, v4.16b, v2.16b \n"
"smlal2 v11.8h, v4.16b, v3.16b \n"
"sadalp v16.4s, v8.8h \n"// i0k0, i1k1, i2k2, i3k3
"sadalp v17.4s, v9.8h \n"// i1k0, i0k1, i3k2, i2k3
"sadalp v18.4s, v10.8h \n"// i2k0, i3k1, i0k2, i1k3
"sadalp v19.4s, v11.8h \n"// i3k0, i2k1, i1k2, i0k3
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"// for (; k+1<L; k=k+2)
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = K & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"lsr w4, w4, #1 \n"// r4 = nn = L >> 1
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"// for (; k+1<L; k=k+2)
"ld1 {v0.8b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.8b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"rev32 v1.4h, v0.4h \n"// i2, i3, i0, i1
"rev64 v2.2s, v0.2s \n"// i1, i0, i3, i2
"rev64 v3.4h, v0.4h \n"// i0, i1, i2, i3
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"sadalp v16.4s, v8.8h \n"
"sadalp v17.4s, v9.8h \n"
"sadalp v18.4s,v10.8h \n"
"sadalp v19.4s,v11.8h \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"// realloc
"mov v20.s[0], v16.s[0] \n"
"mov v20.s[1], v17.s[0] \n"
"mov v20.s[2], v18.s[0] \n"
"mov v20.s[3], v19.s[0] \n"
"mov v21.s[0], v17.s[1] \n"
"mov v21.s[1], v16.s[1] \n"
"mov v21.s[2], v19.s[1] \n"
"mov v21.s[3], v18.s[1] \n"
"mov v22.s[0], v18.s[2] \n"
"mov v22.s[1], v19.s[2] \n"
"mov v22.s[2], v16.s[2] \n"
"mov v22.s[3], v17.s[2] \n"
"mov v23.s[0], v19.s[3] \n"
"mov v23.s[1], v18.s[3] \n"
"mov v23.s[2], v17.s[3] \n"
"mov v23.s[3], v16.s[3] \n"
"and w4, %w12, #1 \n"// w4 = remain = K & 1;
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v0.8b}, [%4] \n"
"ld1 {v1.8b}, [%5] \n"
"add %4, %4, #4 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n"// i0[0], i1[0], i2[0], i3[0]
"sshll v1.8h, v1.8b, #0 \n"// k0[0], k1[0], k2[0], k3[0]
"smlal v20.4s, v0.4h, v1.h[0] \n"// i0k0, i1k0, i2k0, i3k0
"smlal v21.4s, v0.4h, v1.h[1] \n"// i0k1, i1k1, i2k1, i3k1
"smlal v22.4s, v0.4h, v1.h[2] \n"// i0k2, i1k2, i2k2, i3k2
"smlal v23.4s, v0.4h, v1.h[3] \n"// i0k3, i1k3, i2k3, i3k3
"subs w4, w4, #1 \n"
"bne 2b \n"
"5: \n"
"st1 {v20.4s}, [%0] \n"
"st1 {v21.4s}, [%1] \n"
"st1 {v22.4s}, [%2] \n"
"st1 {v23.4s}, [%3] \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_0 += tmpptr[1] * kptr[1];
sum0_1 += tmpptr[2] * kptr[0];
sum0_1 += tmpptr[3] * kptr[1];
sum0_2 += tmpptr[4] * kptr[0];
sum0_2 += tmpptr[5] * kptr[1];
sum0_3 += tmpptr[6] * kptr[0];
sum0_3 += tmpptr[7] * kptr[1];
sum1_0 += tmpptr[0] * kptr[2];
sum1_0 += tmpptr[1] * kptr[3];
sum1_1 += tmpptr[2] * kptr[2];
sum1_1 += tmpptr[3] * kptr[3];
sum1_2 += tmpptr[4] * kptr[2];
sum1_2 += tmpptr[5] * kptr[3];
sum1_3 += tmpptr[6] * kptr[2];
sum1_3 += tmpptr[7] * kptr[3];
sum2_0 += tmpptr[0] * kptr[4];
sum2_0 += tmpptr[1] * kptr[5];
sum2_1 += tmpptr[2] * kptr[4];
sum2_1 += tmpptr[3] * kptr[5];
sum2_2 += tmpptr[4] * kptr[4];
sum2_2 += tmpptr[5] * kptr[5];
sum2_3 += tmpptr[6] * kptr[4];
sum2_3 += tmpptr[7] * kptr[5];
sum3_0 += tmpptr[0] * kptr[6];
sum3_0 += tmpptr[1] * kptr[7];
sum3_1 += tmpptr[2] * kptr[6];
sum3_1 += tmpptr[3] * kptr[7];
sum3_2 += tmpptr[4] * kptr[6];
sum3_2 += tmpptr[5] * kptr[7];
sum3_3 += tmpptr[6] * kptr[6];
sum3_3 += tmpptr[7] * kptr[7];
tmpptr += 8;
kptr += 8;
}
for (; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
#endif
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if 0//__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+3<inch; q=q+4)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3]
int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3]
int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2]
int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3]
tmpptr += 4;
kptr += 16;
}
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1]
_r0[2] = _r0[0];
_r0[3] = _r0[1];
_r0[4] = _r0[0];
_r0[5] = _r0[1];
_r0[6] = _r0[0];
_r0[7] = _r0[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k[0-3][0]
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vaddw_s16(_sum, vget_low_s16(_tp0));
tmpptr += 1;
kptr += 4;
}
vst1q_lane_s32(outptr0, _sum, 0);
vst1q_lane_s32(outptr1, _sum, 1);
vst1q_lane_s32(outptr2, _sum, 2);
vst1q_lane_s32(outptr3, _sum, 3);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[0] * kptr[2];
sum1 += tmpptr[1] * kptr[3];
sum2 += tmpptr[0] * kptr[4];
sum2 += tmpptr[1] * kptr[5];
sum3 += tmpptr[0] * kptr[6];
sum3 += tmpptr[1] * kptr[7];
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr += 1;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
#endif
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
int* outptr0 = out0;
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1]
int8x8_t _k = vld1_s8(kptr); // k0[0-1]
_k[2] = _k[0];
_k[3] = _k[1];
_k[4] = _k[0];
_k[5] = _k[1];
_k[6] = _k[0];
_k[7] = _k[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0]
int8x8_t _k = vld1_s8(kptr); // k[0][0]
int16x8_t _r0_s16 = vmovl_s8(_r0);
int16x8_t _k_s16 = vmovl_s8(_k);
_sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0
tmpptr += 4;
kptr += 1;
}
vst1q_s32(outptr0, _sum);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[2] * kptr[0];
sum1 += tmpptr[3] * kptr[1];
sum2 += tmpptr[4] * kptr[0];
sum2 += tmpptr[5] * kptr[1];
sum3 += tmpptr[6] * kptr[0];
sum3 += tmpptr[7] * kptr[1];
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
#endif
outptr0 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// bottom_tm memory packed 4 x 4
ncnn::Mat bottom_tm(4, inch, size/4 + size%4, (size_t)1u, opt.workspace_allocator);
{
int nn_size = size >> 2;
int remain_size_start = nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 4;
const signed char* img0 = bottom_blob.channel(0);
const signed char* img1 = bottom_blob.channel(1);
img0 += i;
img1 += i;
signed char* tmpptr = bottom_tm.channel(i/4);
int q = 0;
for (; q+1<inch; q=q+2)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img0[1];
tmpptr[3] = img1[1];
tmpptr[4] = img0[2];
tmpptr[5] = img1[2];
tmpptr[6] = img0[3];
tmpptr[7] = img1[3];
tmpptr += 8;
img0 += bottom_blob.cstep;
img0 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
img1 += bottom_blob.cstep;
}
for (; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
signed char* outptr0 = top_blob.channel(p);
signed char* outptr1 = top_blob.channel(p+1);
signed char* outptr2 = top_blob.channel(p+2);
signed char* outptr3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float scale_requant_in0 = scales_requant[2*p];
const float scale_requant_out0 = scales_requant[2*p+1];
const float scale_requant_in1 = scales_requant[2*(p+1)];
const float scale_requant_out1 = scales_requant[2*(p+1)+1];
const float scale_requant_in2 = scales_requant[2*(p+2)];
const float scale_requant_out2 = scales_requant[2*(p+2)+1];
const float scale_requant_in3 = scales_requant[2*(p+3)];
const float scale_requant_out3 = scales_requant[2*(p+3)+1];
float32x4_t _bias03, _scale_in03, _scale_out03;
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _bias1 = vdupq_n_f32(bias1);
float32x4_t _bias2 = vdupq_n_f32(bias2);
float32x4_t _bias3 = vdupq_n_f32(bias3);
_bias03[0] = bias0;
_bias03[1] = bias1;
_bias03[2] = bias2;
_bias03[3] = bias3;
_scale_in03[0] = scale_requant_in0;
_scale_in03[1] = scale_requant_in1;
_scale_in03[2] = scale_requant_in2;
_scale_in03[3] = scale_requant_in3;
_scale_out03[0] = scale_requant_out0;
_scale_out03[1] = scale_requant_out1;
_scale_out03[2] = scale_requant_out2;
_scale_out03[3] = scale_requant_out3;
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4);
#if 1 //__ARM_NEON
asm volatile(
"prfm pldl1keep, [%4, #128] \n"
"prfm pldl1keep, [%5, #128] \n"
"eor v16.16b, v16.16b, v16.16b \n" // sum0
"eor v17.16b, v17.16b, v17.16b \n" // sum1
"eor v18.16b, v18.16b, v18.16b \n" // sum2
"eor v19.16b, v19.16b, v19.16b \n" // sum3
"lsr w4, %w12, #2 \n"// r4 = nn = L >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"// for (; k+3<L; k=k+4)
"ld1 {v0.16b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.16b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #16 \n"
"add %5, %5, #16 \n"
"rev32 v1.8h, v0.8h \n"// i1, i0, i3, i2
"rev64 v2.4s, v0.4s \n"// i2, i3, i0, i1
"rev64 v3.8h, v0.8h \n"// i3, i2, i1, i0
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"prfm pldl1keep, [%4, #1024] \n"
"prfm pldl1keep, [%5, #1024] \n"
"smlal2 v8.8h, v4.16b, v0.16b \n"
"smlal2 v9.8h, v4.16b, v1.16b \n"
"smlal2 v10.8h, v4.16b, v2.16b \n"
"smlal2 v11.8h, v4.16b, v3.16b \n"
"sadalp v16.4s, v8.8h \n"// i0k0, i1k1, i2k2, i3k3
"sadalp v17.4s, v9.8h \n"// i1k0, i0k1, i3k2, i2k3
"sadalp v18.4s, v10.8h \n"// i2k0, i3k1, i0k2, i1k3
"sadalp v19.4s, v11.8h \n"// i3k0, i2k1, i1k2, i0k3
"subs w4, w4, #1 \n"
"bne 0b \n"
"1: \n"// for (; k+1<L; k=k+2)
// remain loop
"and w4, %w12, #3 \n"// w4 = remain = K & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"lsr w4, w4, #1 \n"// r4 = nn = L >> 1
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"// for (; k+1<L; k=k+2)
"ld1 {v0.8b}, [%4] \n"// i0, i1, i2, i3
"ld1 {v4.8b}, [%5] \n"// k0, k1, k2, k3
"add %4, %4, #8 \n"
"add %5, %5, #8 \n"
"rev32 v1.4h, v0.4h \n"// i2, i3, i0, i1
"rev64 v2.2s, v0.2s \n"// i1, i0, i3, i2
"rev64 v3.4h, v0.4h \n"// i0, i1, i2, i3
"smull v8.8h, v4.8b, v0.8b \n"
"smull v9.8h, v4.8b, v1.8b \n"
"smull v10.8h, v4.8b, v2.8b \n"
"smull v11.8h, v4.8b, v3.8b \n"
"sadalp v16.4s, v8.8h \n"
"sadalp v17.4s, v9.8h \n"
"sadalp v18.4s,v10.8h \n"
"sadalp v19.4s,v11.8h \n"
"subs w4, w4, #1 \n"
"bne 2b \n"
"3: \n"// realloc
"mov v20.s[0], v16.s[0] \n"
"mov v20.s[1], v17.s[0] \n"
"mov v20.s[2], v18.s[0] \n"
"mov v20.s[3], v19.s[0] \n"
"mov v21.s[0], v17.s[1] \n"
"mov v21.s[1], v16.s[1] \n"
"mov v21.s[2], v19.s[1] \n"
"mov v21.s[3], v18.s[1] \n"
"mov v22.s[0], v18.s[2] \n"
"mov v22.s[1], v19.s[2] \n"
"mov v22.s[2], v16.s[2] \n"
"mov v22.s[3], v17.s[2] \n"
"mov v23.s[0], v19.s[3] \n"
"mov v23.s[1], v18.s[3] \n"
"mov v23.s[2], v17.s[3] \n"
"mov v23.s[3], v16.s[3] \n"
"and w4, %w12, #1 \n"// w4 = remain = K & 1;
"cmp w4, #0 \n"
"beq 5f \n"
"4: \n"
"ld1 {v0.8b}, [%4] \n"
"ld1 {v1.8b}, [%5] \n"
"add %4, %4, #4 \n"
"add %5, %5, #4 \n"
"sshll v0.8h, v0.8b, #0 \n"// i0[0], i1[0], i2[0], i3[0]
"sshll v1.8h, v1.8b, #0 \n"// k0[0], k1[0], k2[0], k3[0]
"smlal v20.4s, v0.4h, v1.h[0] \n"// i0k0, i1k0, i2k0, i3k0
"smlal v21.4s, v0.4h, v1.h[1] \n"// i0k1, i1k1, i2k1, i3k1
"smlal v22.4s, v0.4h, v1.h[2] \n"// i0k2, i1k2, i2k2, i3k2
"smlal v23.4s, v0.4h, v1.h[3] \n"// i0k3, i1k3, i2k3, i3k3
"subs w4, w4, #1 \n"
"bne 2b \n"
"5: \n"
// top_s32 -> top_f32
"scvtf v20.4s, v20.4s \n"
"scvtf v21.4s, v21.4s \n"
"scvtf v22.4s, v22.4s \n"
"scvtf v23.4s, v23.4s \n"
// top_f32 = top_f32 * scale_in
"fmul v20.4s, v20.4s, %17.s[0] \n"
"fmul v21.4s, v21.4s, %17.s[1] \n"
"fmul v22.4s, v22.4s, %17.s[2] \n"
"fmul v23.4s, v23.4s, %17.s[3] \n"
// top_f32 = top_f32 + bias
"fadd v20.4s, v20.4s, %13.4s \n"
"fadd v21.4s, v21.4s, %14.4s \n"
"fadd v22.4s, v22.4s, %15.4s \n"
"fadd v23.4s, v23.4s, %16.4s \n"
// top_f32 = top_f32 * scale_out
"fmul v20.4s, v20.4s, %18.s[0] \n"
"fmul v21.4s, v21.4s, %18.s[1] \n"
"fmul v22.4s, v22.4s, %18.s[2] \n"
"fmul v23.4s, v23.4s, %18.s[3] \n"
// top_f32 -> top_s32
"fcvtas v20.4s, v20.4s \n"
"fcvtas v21.4s, v21.4s \n"
"fcvtas v22.4s, v22.4s \n"
"fcvtas v23.4s, v23.4s \n"
// top_s32 -> top_s16
"sqxtn v7.4h, v20.4s \n"
"sqxtn2 v7.8h, v21.4s \n"
"sqxtn v8.4h, v22.4s \n"
"sqxtn2 v8.8h, v23.4s \n"
// top_s16 -> top_s8
"sqxtn v0.8b, v7.8h \n"
"sqxtn v1.8b, v8.8h \n"
// save top_s8
"st1 {v0.s}[0], [%0] \n"
"st1 {v0.s}[1], [%1] \n"
"st1 {v1.s}[0], [%2] \n"
"st1 {v1.s}[1], [%3] \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"w"(_bias0), // %13
"w"(_bias1), // %14
"w"(_bias2), // %15
"w"(_bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_0 += tmpptr[1] * kptr[1];
sum0_1 += tmpptr[2] * kptr[0];
sum0_1 += tmpptr[3] * kptr[1];
sum0_2 += tmpptr[4] * kptr[0];
sum0_2 += tmpptr[5] * kptr[1];
sum0_3 += tmpptr[6] * kptr[0];
sum0_3 += tmpptr[7] * kptr[1];
sum1_0 += tmpptr[0] * kptr[2];
sum1_0 += tmpptr[1] * kptr[3];
sum1_1 += tmpptr[2] * kptr[2];
sum1_1 += tmpptr[3] * kptr[3];
sum1_2 += tmpptr[4] * kptr[2];
sum1_2 += tmpptr[5] * kptr[3];
sum1_3 += tmpptr[6] * kptr[2];
sum1_3 += tmpptr[7] * kptr[3];
sum2_0 += tmpptr[0] * kptr[4];
sum2_0 += tmpptr[1] * kptr[5];
sum2_1 += tmpptr[2] * kptr[4];
sum2_1 += tmpptr[3] * kptr[5];
sum2_2 += tmpptr[4] * kptr[4];
sum2_2 += tmpptr[5] * kptr[5];
sum2_3 += tmpptr[6] * kptr[4];
sum2_3 += tmpptr[7] * kptr[5];
sum3_0 += tmpptr[0] * kptr[6];
sum3_0 += tmpptr[1] * kptr[7];
sum3_1 += tmpptr[2] * kptr[6];
sum3_1 += tmpptr[3] * kptr[7];
sum3_2 += tmpptr[4] * kptr[6];
sum3_2 += tmpptr[5] * kptr[7];
sum3_3 += tmpptr[6] * kptr[6];
sum3_3 += tmpptr[7] * kptr[7];
tmpptr += 8;
kptr += 8;
}
for (; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0_0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[1] = float2int8(((float)sum0_1 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[2] = float2int8(((float)sum0_2 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr0[3] = float2int8(((float)sum0_3 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1_0 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[1] = float2int8(((float)sum1_1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[2] = float2int8(((float)sum1_2 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr1[3] = float2int8(((float)sum1_3 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2_0 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[1] = float2int8(((float)sum2_1 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[2] = float2int8(((float)sum2_2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr2[3] = float2int8(((float)sum2_3 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3_0 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[1] = float2int8(((float)sum3_1 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[2] = float2int8(((float)sum3_2 * scale_requant_in3 + bias3) * scale_requant_out3);
outptr3[3] = float2int8(((float)sum3_3 * scale_requant_in3 + bias3) * scale_requant_out3);
#endif
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if 1 //__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+3<inch; q=q+4)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8x2_t _k = vld2_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1];k0[2-3], k1[2-3], k2[2-3], k3[2-3]
int16x8_t _r0_s16 = vmovl_s8(_r0); // i0[0],i0[1],i0[2],i0[3]
int16x8_t _k02_s16 = vmovl_s8(_k.val[0]); // k0[0],k1[0],k2[0],k3[0],k0[2],k1[2],k2[2],k3[2]
int16x8_t _k13_s16 = vmovl_s8(_k.val[1]); // k0[1],k1[1],k2[1],k3[1],k0[3],k1[3],k2[3],k3[3]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k02_s16), vget_low_s16(_r0_s16), 0); // i0[0]*k[0-3][0]
_sum = vmlal_lane_s16(_sum, vget_low_s16(_k13_s16), vget_low_s16(_r0_s16), 1); // i0[1]*k[0-3][1]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k02_s16), vget_low_s16(_r0_s16), 2); // i0[2]*k[0-3][2]
_sum = vmlal_lane_s16(_sum, vget_high_s16(_k13_s16), vget_low_s16(_r0_s16), 3); // i0[3]*k[0-3][3]
tmpptr += 4;
kptr += 16;
}
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k0[0-1], k1[0-1], k2[0-1], k3[0-1]
_r0[2] = _r0[0];
_r0[3] = _r0[1];
_r0[4] = _r0[0];
_r0[5] = _r0[1];
_r0[6] = _r0[0];
_r0[7] = _r0[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-3]
int8x8_t _k = vld1_s8(kptr); // k[0-3][0]
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vaddw_s16(_sum, vget_low_s16(_tp0));
tmpptr += 1;
kptr += 4;
}
// top_s32 -> top_f32
float32x4_t _sum_f32 = vcvtq_f32_s32(_sum);
// top_f32 = top_f32 * scale_in
_sum_f32 = vmulq_f32(_sum_f32, _scale_in03);
// top_f32 = top_f32 + bias
_sum_f32 = vaddq_f32(_sum_f32, _bias03);
// top_f32 = top_f32 * scale_out
_sum_f32 = vmulq_f32(_sum_f32, _scale_out03);
// top_f32 -> top_s32
_sum = vcvtaq_s32_f32(_sum_f32);
// top_s32 -> top_s16
int16x4_t _sum_s16 = vqmovn_s32(_sum);
int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16);
// top_s16 -> top_s8
int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp);
// save top_s8
vst1_lane_s8(outptr0, _sum_s8, 0);
vst1_lane_s8(outptr1, _sum_s8, 1);
vst1_lane_s8(outptr2, _sum_s8, 2);
vst1_lane_s8(outptr3, _sum_s8, 3);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[0] * kptr[2];
sum1 += tmpptr[1] * kptr[3];
sum2 += tmpptr[0] * kptr[4];
sum2 += tmpptr[1] * kptr[5];
sum3 += tmpptr[0] * kptr[6];
sum3 += tmpptr[1] * kptr[7];
tmpptr += 2;
kptr += 8;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr += 1;
kptr += 4;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0);
outptr1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1);
outptr2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2);
outptr3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3);
#endif
outptr0++;
outptr1++;
outptr2++;
outptr3++;
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
signed char* outptr0 = out0;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2*p];
const float scale_requant_out = scales_requant[2*p+1];
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _scale_in = vdupq_n_f32(scale_requant_in);
float32x4_t _scale_out = vdupq_n_f32(scale_requant_out);
int i = 0;
for (; i+3<size; i+=4)
{
signed char* tmpptr = bottom_tm.channel(i/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if 1 //__ARM_NEON
int32x4_t _sum = vdupq_n_s32(0);
int q=0;
for (; q+1<inch; q=q+2)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0-1], i1[0-1], i2[0-1], i3[0-1]
int8x8_t _k = vld1_s8(kptr); // k0[0-1]
_k[2] = _k[0];
_k[3] = _k[1];
_k[4] = _k[0];
_k[5] = _k[1];
_k[6] = _k[0];
_k[7] = _k[1];
int16x8_t _tp0 = vmull_s8(_k, _r0);
_sum = vpadalq_s16(_sum, _tp0);
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
int8x8_t _r0 = vld1_s8(tmpptr); // i0[0], i1[0], i2[0], i3[0]
int8x8_t _k = vld1_s8(kptr); // k[0][0]
int16x8_t _r0_s16 = vmovl_s8(_r0);
int16x8_t _k_s16 = vmovl_s8(_k);
_sum = vmlal_lane_s16(_sum, vget_low_s16(_r0_s16), vget_low_s16(_k_s16), 0); // i0k0, i1k0, i2k0, i3k0
tmpptr += 4;
kptr += 1;
}
// top_s32 -> top_f32
float32x4_t _sum_f32 = vcvtq_f32_s32(_sum);
// top_f32 = top_f32 * scale_in
_sum_f32 = vmulq_f32(_sum_f32, _scale_in);
// top_f32 = top_f32 + bias
_sum_f32 = vaddq_f32(_sum_f32, _bias0);
// top_f32 = top_f32 * scale_out
_sum_f32 = vmulq_f32(_sum_f32, _scale_out);
// top_f32 -> top_s32
_sum = vcvtaq_s32_f32(_sum_f32);
// top_s32 -> top_s16
int16x4_t _sum_s16 = vqmovn_s32(_sum);
int16x8_t _sum_s16_tp = vcombine_s16(_sum_s16, _sum_s16);
// top_s16 -> top_s8
int8x8_t _sum_s8 = vqmovn_s16(_sum_s16_tp);
// save top_s8
vst1_s8(outptr0, _sum_s8);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int q=0;
for (; q+1<inch; q=q+2)
{
sum0 += tmpptr[0] * kptr[0];
sum0 += tmpptr[1] * kptr[1];
sum1 += tmpptr[2] * kptr[0];
sum1 += tmpptr[3] * kptr[1];
sum2 += tmpptr[4] * kptr[0];
sum2 += tmpptr[5] * kptr[1];
sum3 += tmpptr[6] * kptr[0];
sum3 += tmpptr[7] * kptr[1];
tmpptr += 8;
kptr += 2;
}
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0[1] = float2int8(((float)sum1 * scale_requant_in + bias0) * scale_requant_out);
outptr0[2] = float2int8(((float)sum2 * scale_requant_in + bias0) * scale_requant_out);
outptr0[3] = float2int8(((float)sum3 * scale_requant_in + bias0) * scale_requant_out);
#endif
outptr0 += 4;
}
for (; i<size; i++)
{
signed char* tmpptr = bottom_tm.channel(i/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
outptr0++;
}
}
}
#endif
#else
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u);
int p = 0;
for (; p+3<outch; p+=4)
{
const signed char* kernel0 = kernel + (p+0)*inch;
const signed char* kernel1 = kernel + (p+1)*inch;
const signed char* kernel2 = kernel + (p+2)*inch;
const signed char* kernel3 = kernel + (p+3)*inch;
signed char* ktmp = kernel_tm.channel(p/4);
for (int q=0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const signed char* kernel0 = kernel + p*inch;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
for (int q=0; q<inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
/*
* Convolution 1x1 quantized with sgemm int8
*/
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p+1);
int* outptr2 = top_blob.channel(p+2);
int* outptr3 = top_blob.channel(p+3);
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d7}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q8, d4, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q9, d5, d0[1] \n"
"vmlal.s16 q10, d4, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q11, d5, d0[2] \n"
"vmlal.s16 q12, d4, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q13, d5, d0[3] \n"
"vmlal.s16 q6, d6, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q7, d7, d1[0] \n"
"vmlal.s16 q8, d6, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q9, d7, d1[1] \n"
"vmlal.s16 q10, d6, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q11, d7, d1[2] \n"
"vmlal.s16 q12, d6, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q13, d7, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
"vst1.s32 {d16-d19}, [%1]! \n"
"vst1.s32 {d20-d23}, [%2]! \n"
"vst1.s32 {d24-d27}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d5}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a03) * k00
"vmlal.s16 q7, d4, d0[1] \n"// sum1 = (a00-a03) * k10
"vmlal.s16 q8, d4, d0[2] \n"// sum2 = (a00-a03) * k20
"vmlal.s16 q9, d4, d0[3] \n"// sum3 = (a00-a03) * k30
"vmlal.s16 q6, d5, d1[0] \n"// sum0 += (a10-a13) * k01
"vmlal.s16 q7, d5, d1[1] \n"// sum1 += (a10-a13) * k11
"vmlal.s16 q8, d5, d1[2] \n"// sum2 += (a10-a13) * k21
"vmlal.s16 q9, d5, d1[3] \n"// sum3 += (a10-a13) * k31
"vmlal.s16 q6, d6, d2[0] \n"// sum0 += (a20-a23) * k02
"vmlal.s16 q7, d6, d2[1] \n"// sum1 += (a20-a23) * k12
"vmlal.s16 q8, d6, d2[2] \n"// sum2 += (a20-a23) * k22
"vmlal.s16 q9, d6, d2[3] \n"// sum3 += (a20-a23) * k32
"vmlal.s16 q6, d7, d3[0] \n"// sum0 += (a30-a33) * k03
"vmlal.s16 q7, d7, d3[1] \n"// sum1 += (a30-a33) * k13
"vmlal.s16 q8, d7, d3[2] \n"// sum2 += (a30-a33) * k23
"vmlal.s16 q9, d7, d3[3] \n"// sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
"vst1.s32 {d14-d15}, [%1]! \n"
"vst1.s32 {d16-d17}, [%2]! \n"
"vst1.s32 {d18-d19}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n"// a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d20[0]}, [%0]! \n"
"vst1.s32 {d20[1]}, [%1]! \n"
"vst1.s32 {d21[0]}, [%2]! \n"
"vst1.s32 {d21[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
int* outptr0 = out0;
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
// // NOTE sgemm int8
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// int* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// int sum = 0;
//
// const signed char* kptr = _kernel.channel(p/8 + p%8);
//
// for (int q=0; q<inch; q++)
// {
// const signed char* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
signed char* outptr0 = top_blob.channel(p);
signed char* outptr1 = top_blob.channel(p+1);
signed char* outptr2 = top_blob.channel(p+2);
signed char* outptr3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float scale_requant_in0 = scales_requant[2*p];
const float scale_requant_out0 = scales_requant[2*p+1];
const float scale_requant_in1 = scales_requant[2*(p+1)];
const float scale_requant_out1 = scales_requant[2*(p+1)+1];
const float scale_requant_in2 = scales_requant[2*(p+2)];
const float scale_requant_out2 = scales_requant[2*(p+2)+1];
const float scale_requant_in3 = scales_requant[2*(p+3)];
const float scale_requant_out3 = scales_requant[2*(p+3)+1];
#if __ARM_NEON
float32x4_t _bias03, _scale_in03, _scale_out03;
_bias03[0] = bias0;
_bias03[1] = bias1;
_bias03[2] = bias2;
_bias03[3] = bias3;
_scale_in03[0] = scale_requant_in0;
_scale_in03[1] = scale_requant_in1;
_scale_in03[2] = scale_requant_in2;
_scale_in03[3] = scale_requant_in3;
_scale_out03[0] = scale_requant_out0;
_scale_out03[1] = scale_requant_out1;
_scale_out03[2] = scale_requant_out2;
_scale_out03[3] = scale_requant_out3;
#endif // __ARM_NEON
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d31}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d31 \n"// a30-a37
"vmovl.s8 q4, d30 \n"// a20-a27
"vmovl.s8 q15, d29 \n"// a10-a17
"vmovl.s8 q14, d28 \n"// a00-a07
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q7, d29, d0[0] \n"
"vmlal.s16 q8, d28, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q9, d29, d0[1] \n"
"vmlal.s16 q10, d28, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q11, d29, d0[2] \n"
"vmlal.s16 q12, d28, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q13, d29, d0[3] \n"
"vmlal.s16 q6, d30, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q7, d31, d1[0] \n"
"vmlal.s16 q8, d30, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q9, d31, d1[1] \n"
"vmlal.s16 q10, d30, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q11, d31, d1[2] \n"
"vmlal.s16 q12, d30, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q13, d31, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[0] \n"
"vmul.f32 q8, q8, %e17[1] \n"
"vmul.f32 q9, q9, %e17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q14 \n"
"vadd.f32 q8, q8, q15 \n"
"vadd.f32 q9, q9, q15 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
// sum1
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %e18[1] \n"
"vmul.f32 q1, q9, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.8 {d16}, [%1]! \n"
// sum2
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
"vcvt.f32.s32 q11, q11 \n"
"vcvt.f32.s32 q12, q12 \n"
"vcvt.f32.s32 q13, q13 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %f17[0] \n"
"vmul.f32 q11, q11, %f17[0] \n"
"vmul.f32 q12, q12, %f17[1] \n"
"vmul.f32 q13, q13, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, q4 \n"
"vadd.f32 q11, q11, q4 \n"
"vadd.f32 q12, q12, q5 \n"
"vadd.f32 q13, q13, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %f18[0] \n"
"vmul.f32 q1, q11, %f18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d20, q0 \n"
"vqmovn.s32 d21, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d20, q10 \n"
// save top_s8
"vst1.8 {d20}, [%2]! \n"
// sum3
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q12, %f18[1] \n"
"vmul.f32 q1, q13, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d24, q0 \n"
"vqmovn.s32 d25, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d24, q12 \n"
// save top_s8
"vst1.8 {d24}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ,"q14" ,"q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d29}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q15, d29 \n"// a20-a23,a30-a33
"vmovl.s8 q14, d28 \n"// a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a03) * k00
"vmlal.s16 q7, d28, d0[1] \n"// sum1 = (a00-a03) * k10
"vmlal.s16 q8, d28, d0[2] \n"// sum2 = (a00-a03) * k20
"vmlal.s16 q9, d28, d0[3] \n"// sum3 = (a00-a03) * k30
"vmlal.s16 q6, d29, d1[0] \n"// sum0 += (a10-a13) * k01
"vmlal.s16 q7, d29, d1[1] \n"// sum1 += (a10-a13) * k11
"vmlal.s16 q8, d29, d1[2] \n"// sum2 += (a10-a13) * k21
"vmlal.s16 q9, d29, d1[3] \n"// sum3 += (a10-a13) * k31
"vmlal.s16 q6, d30, d2[0] \n"// sum0 += (a20-a23) * k02
"vmlal.s16 q7, d30, d2[1] \n"// sum1 += (a20-a23) * k12
"vmlal.s16 q8, d30, d2[2] \n"// sum2 += (a20-a23) * k22
"vmlal.s16 q9, d30, d2[3] \n"// sum3 += (a20-a23) * k32
"vmlal.s16 q6, d31, d3[0] \n"// sum0 += (a30-a33) * k03
"vmlal.s16 q7, d31, d3[1] \n"// sum1 += (a30-a33) * k13
"vmlal.s16 q8, d31, d3[2] \n"// sum2 += (a30-a33) * k23
"vmlal.s16 q9, d31, d3[3] \n"// sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0-1
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[1] \n"
"vmul.f32 q8, q8, %f17[0] \n"
"vmul.f32 q9, q9, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.s32 {d12[0]}, [%0]! \n"
"vst1.s32 {d12[1]}, [%1]! \n"
// sum1-2
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %f18[0] \n"
"vmul.f32 q1, q9, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.s32 {d16[0]}, [%2]! \n"
"vst1.s32 {d16[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n"// a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, %q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12[0]}, [%0]! \n"
"vst1.8 {d12[1]}, [%1]! \n"
"vst1.8 {d12[2]}, [%2]! \n"
"vst1.8 {d12[3]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"w"(_bias03), // %13
"w"(_scale_in03), // %14
"w"(_scale_out03) // %15
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
signed char* outptr0 = out0;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2*p];
const float scale_requant_out = scales_requant[2*p+1];
#if __ARM_NEON
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _scale_in = vdupq_n_f32(scale_requant_in);
float32x4_t _scale_out = vdupq_n_f32(scale_requant_out);
#endif // __ARM_NEON
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
"vmul.f32 q7, q7, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
"vadd.f32 q7, q7, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
"vmul.f32 q1, q7, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
"vst1.s32 {d12[0]}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
#endif
|
GB_unop__identity_int16_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_uint8)
// op(A') function: GB (_unop_tran__identity_int16_uint8)
// C type: int16_t
// A type: uint8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_uint8)
(
int16_t *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_parallel_for_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 %s -Wuninitialized
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd foo
void test_no_clause() {
int i;
#pragma omp target parallel for simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp target parallel for simd' must be a for loop}}
#pragma omp target parallel for simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp target parallel for simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd foo bar
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd private(x);
for (i = 0; i < 16; ++i)
;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
#pragma omp target parallel for simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd collapse
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd collapse()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd collapse(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd collapse 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
#pragma omp target parallel for simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp target parallel for simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp target parallel for simd', but found only 1}}
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for simd collapse(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
// expected-note@+1 {{defined as firstprivate}}
#pragma omp target parallel for simd collapse(2) firstprivate(i) // expected-note {{defined as firstprivate}}
for (i = 0; i < 16; ++i) // expected-error {{loop iteration variable in the associated loop of 'omp target parallel for simd' directive may not be firstprivate, predetermined as lastprivate}}
// expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}}
for (int j = 0; j < 16; ++j)
// expected-error@+2 2 {{reduction variable must be shared}}
// expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}}
#pragma omp for reduction(+ : i, j)
for (int k = 0; k < 16; ++k)
i += j;
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd private(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd private(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd private(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd private()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd private(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd lastprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate(
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd firstprivate(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected variable name}}
#pragma omp target parallel for simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp target parallel for simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp target parallel for simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
}
void test_safelen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd safelen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd safelen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd safelen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd safelen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd safelen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for simd safelen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for simd safelen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd safelen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_simdlen() {
int i;
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd simdlen
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd simdlen()
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(,
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(, )
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp target parallel for simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp target parallel for simd simdlen 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4,
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, )
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4 4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, , 4)
for (i = 0; i < 16; ++i)
;
#pragma omp target parallel for simd simdlen(4)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd simdlen(4, 8)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for simd simdlen(2.5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{integer constant expression}}
#pragma omp target parallel for simd simdlen(foo())
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(-5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(0)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}}
#pragma omp target parallel for simd simdlen(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_safelen_simdlen() {
int i;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target parallel for simd simdlen(6) safelen(5)
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}}
#pragma omp target parallel for simd safelen(5) simdlen(6)
for (i = 0; i < 16; ++i)
;
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp target parallel for simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected expression}}
#pragma omp target parallel for simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp target parallel for simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp target parallel for simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp target parallel for simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp target parallel for simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp target parallel for simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp target parallel for simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp target parallel for simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp target parallel for simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp target parallel for simd'}}
#pragma omp target parallel for simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
GB_unop__frexpe_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__frexpe_fp32_fp32)
// op(A') function: GB (_unop_tran__frexpe_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = GB_frexpef (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_frexpef (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = GB_frexpef (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FREXPE || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__frexpe_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = GB_frexpef (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__frexpe_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
parallel-simple.c | /*
Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze
(joachim.protze@tu-dresden.de), Jonas Hahnfeld
(hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir
Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin
Schulz.
LLNL-CODE-773957
All rights reserved.
This file is part of Archer. For details, see
https://pruners.github.io/archer. Please also read
https://github.com/PRUNERS/archer/blob/master/LICENSE.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// RUN: %libarcher-compile-and-run-race | FileCheck %s
#include <omp.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
{
var++;
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK: Write of size 4
// CHECK: #0 .omp_outlined.
// CHECK: Previous write of size 4
// CHECK: #0 .omp_outlined.
// CHECK: DONE
|
GB_unop__identity_uint64_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint64_int16
// op(A') function: GB_unop_tran__identity_uint64_int16
// C type: uint64_t
// A type: int16_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = (uint64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint64_int16
(
uint64_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
uint64_t z = (uint64_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint64_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxnet_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mxnet_op.h
* \brief
* \author Junyuan Xie
*/
#ifndef MXNET_OPERATOR_MXNET_OP_H_
#define MXNET_OPERATOR_MXNET_OP_H_
#include <dmlc/omp.h>
#include <mxnet/base.h>
#include <mxnet/op_attr_types.h>
#include <algorithm>
#ifdef __CUDACC__
#include "../common/cuda_utils.h"
#endif // __CUDACC__
namespace mxnet {
namespace op {
namespace mxnet_op {
using namespace mshadow;
#ifdef __CUDA_ARCH__
__constant__ const float PI = 3.14159265358979323846;
#else
const float PI = 3.14159265358979323846;
using std::isnan;
#endif
template<typename xpu>
int get_num_threads(const int N);
#ifdef __CUDACC__
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x)
inline cudaDeviceProp cuda_get_device_prop() {
int device;
CUDA_CALL(cudaGetDevice(&device));
cudaDeviceProp deviceProp;
CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device));
return deviceProp;
}
/*!
* \brief Get the number of blocks for cuda kernel given N
*/
inline int cuda_get_num_blocks(const int N) {
using namespace mshadow::cuda;
return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
}
template<>
inline int get_num_threads<gpu>(const int N) {
using namespace mshadow::cuda;
return kBaseThreadNum * cuda_get_num_blocks(N);
}
#endif // __CUDACC__
template<>
inline int get_num_threads<cpu>(const int N) {
return omp_get_max_threads();
}
/*! \brief operator request type switch */
#define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \
switch (req) { \
case kNullOp: \
break; \
case kWriteInplace: \
case kWriteTo: \
{ \
const OpReqType ReqType = kWriteTo; \
{__VA_ARGS__} \
} \
break; \
case kAddTo: \
{ \
const OpReqType ReqType = kAddTo; \
{__VA_ARGS__} \
} \
break; \
default: \
break; \
}
/*!
* \brief assign the val to out according
* to request in Kernel::Launch
* \param out the data to be assigned
* \param req the assignment request
* \param val the value to be assigned to out
* \tparam OType output type
* \tparam VType value type
*/
#define KERNEL_ASSIGN(out, req, val) \
{ \
switch (req) { \
case kNullOp: \
break; \
case kWriteTo: \
case kWriteInplace: \
(out) = (val); \
break; \
case kAddTo: \
(out) += (val); \
break; \
default: \
break; \
} \
}
/* \brief Compute flattened index given coordinates and shape. */
template<int ndim>
MSHADOW_XINLINE int ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i) {
ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i];
}
return ret;
}
/* Compute coordinates from flattened index given shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> unravel(const int idx, const Shape<ndim>& shape) {
Shape<ndim> ret;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret[i] = j - tmp*shape[i];
j = tmp;
}
return ret;
}
/* Compute dot product of two vector */
template<int ndim>
MSHADOW_XINLINE int dot(const Shape<ndim>& coord, const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = 0; i < ndim; ++i)
ret += coord[i] * stride[i];
return ret;
}
/* Combining unravel and dot */
template<int ndim>
MSHADOW_XINLINE int unravel_dot(const int idx, const Shape<ndim>& shape,
const Shape<ndim>& stride) {
int ret = 0;
#pragma unroll
for (int i = ndim-1, j = idx; i >=0; --i) {
int tmp = j / shape[i];
ret += (j - tmp*shape[i])*stride[i];
j = tmp;
}
return ret;
}
/* Calculate stride of each dim from shape */
template<int ndim>
MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) {
Shape<ndim> stride;
index_t cumprod = 1;
#pragma unroll
for (int i = ndim - 1; i >= 0; --i) {
stride[i] = (shape[i] > 1) ? cumprod : 0;
cumprod *= shape[i];
}
return stride;
}
struct fill {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out, const DType val) {
out[i] = val;
}
};
struct set_zero {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType* out) {
out[i] = static_cast<DType>(0);
}
};
/*! \brief Select assignment operation based upon the req value
* Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch
*/
template<typename OP, int req>
struct op_with_req {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i]));
}
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs, const DType *rhs) {
KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i]));
}
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *in, const DType value) {
KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value));
}
};
template<typename OP, typename xpu>
struct Kernel;
template<typename OP>
struct Kernel<OP, cpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<cpu> *s, int N, Args... args) {
#if (MXNET_USE_CUDA == 0)
#pragma omp parallel for
#endif
for (int i = 0; i < N; ++i) {
OP::Map(i, args...);
}
}
};
#ifdef __CUDACC__
template<typename OP, typename ...Args>
__global__ void mxnet_generic_kernel(int N, Args... args) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
OP::Map(i, args...);
}
}
template<typename OP>
struct Kernel<OP, gpu> {
template<typename ...Args>
inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) {
using namespace mshadow::cuda;
int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum);
mxnet_generic_kernel<OP, Args...>
<<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>(
N, args...);
}
};
#endif // __CUDACC__
} // namespace mxnet_op
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_MXNET_OP_H_
|
core_dtrsm_blasfeo.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from core_blas/core_ztrsm.c, normal z -> d, Thu Aug 8 17:24:56 2019
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include "blasfeo_d_aux.h"
/***************************************************************************//**
*
* @ingroup core_trsm
*
* Solves one of the matrix equations
*
* \f[ op( A )\times X = \alpha B, \f] or
* \f[ X \times op( A ) = \alpha B, \f]
*
* where op( A ) is one of:
* \f[ op( A ) = A, \f]
* \f[ op( A ) = A^T, \f]
* \f[ op( A ) = A^T, \f]
*
* alpha is a scalar, X and B are m-by-n matrices, and
* A is a unit or non-unit, upper or lower triangular matrix.
* The matrix X overwrites B.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] m
* The number of rows of the matrix B. m >= 0.
*
* @param[in] n
* The number of columns of the matrix B. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* The lda-by-ka triangular matrix,
* where ka = m if side = PlasmaLeft,
* and ka = n if side = PlasmaRight.
* If uplo = PlasmaUpper, the leading k-by-k upper triangular part
* of the array A contains the upper triangular matrix, and the
* strictly lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading k-by-k lower triangular part
* of the array A contains the lower triangular matrix, and the
* strictly upper triangular part of A is not referenced.
* If diag = PlasmaUnit, the diagonal elements of A are also not
* referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in,out] B
* On entry, the ldb-by-n right hand side matrix B.
* On exit, if return value = 0, the ldb-by-n solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dtrsm_blasfeo(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
double alpha, struct blasfeo_dmat *sA, int ai, int aj,
struct blasfeo_dmat *sB, int bi, int bj)
{
// cblas_dtrsm(CblasColMajor,
// (CBLAS_SIDE)side, (CBLAS_UPLO)uplo,
// (CBLAS_TRANSPOSE)transa, (CBLAS_DIAG)diag,
// m, n,
// (alpha), A, lda,
// B, ldb);
blasfeo_dtrsm_rltn(m, n, alpha, sA, ai, aj, sB, bi, bj, sB, bi, bj);
}
/******************************************************************************/
void plasma_core_omp_dtrsm_blasfeo(
plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
double alpha, const struct blasfeo_dmat *sA, int ai, int aj,
struct blasfeo_dmat *sB, int bi, int bj,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (side == PlasmaLeft)
ak = m;
else
ak = n;
struct blasfeo_dmat sA2, sB2;
sA2 = *sA;
sB2 = *sB;
double *A = sA->pA;
int sda = sA->cn;
double *B = sB->pA;
int sdb = sB->cn;
// #pragma omp task depend(in:A[0:lda*ak]) \
// depend(inout:B[0:ldb*n])
#pragma omp task depend(in:A[0:sda*ak]) \
depend(inout:B[0:sdb*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_dtrsm_blasfeo(side, uplo,
transa, diag,
m, n,
alpha, &sA2, ai, aj,
&sB2, bi, bj);
}
}
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <memory>
#include <unordered_map>
#include <utility>
#include <vector>
#if defined(INTEL_MKL_ML_ONLY) || defined(INTEL_MKL_DNN_ONLY)
#ifndef INTEL_MKL
#error "INTEL_MKL_{ML,DNN}_ONLY require INTEL_MKL"
#endif
#endif
#if defined(INTEL_MKL_ML_ONLY) && defined(INTEL_MKL_DNN_ONLY)
#error "at most one of INTEL_MKL_ML_ONLY and INTEL_MKL_DNN_ONLY may be defined"
#endif
#ifdef INTEL_MKL_ML_ONLY
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#endif
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/cpu_info.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML_ONLY
#include "mkldnn.hpp"
#include "tensorflow/core/lib/core/stringpiece.h"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
#ifdef _WIN32
typedef unsigned int uint;
#endif
namespace tensorflow {
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
typedef enum {
Dim3d_N = 0,
Dim3d_C = 1,
Dim3d_D = 2,
Dim3d_H = 3,
Dim3d_W = 4,
Dim3d_O = 0,
Dim3d_I = 1
} MklDnnDims3D;
#ifdef INTEL_MKL_ML_ONLY
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#else
// Forward decl
TensorFormat MklDnn3DDataFormatToTFDataFormat(memory::format format);
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline size_t GetDimension3D(char dimension) const {
int index = GetMklDnnTensor3DDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline int32 GetMklDnnTensor3DDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims3D::Dim3d_N;
case 'C':
return MklDnnDims3D::Dim3d_C;
case 'D':
return MklDnnDims3D::Dim3d_D;
case 'H':
return MklDnnDims3D::Dim3d_H;
case 'W':
return MklDnnDims3D::Dim3d_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
if (dimension == 5) {
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<3>(data_format, '0')] =
MklDnnDims3D::Dim3d_D;
data_.map_[GetTensorDimIndex<3>(data_format, '1')] =
MklDnnDims3D::Dim3d_H;
data_.map_[GetTensorDimIndex<3>(data_format, '2')] =
MklDnnDims3D::Dim3d_W;
data_.map_[GetTensorDimIndex<3>(data_format, 'C')] =
MklDnnDims3D::Dim3d_C;
data_.map_[GetTensorDimIndex<3>(data_format, 'N')] =
MklDnnDims3D::Dim3d_N;
} else {
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
#ifndef INTEL_MKL_ML_ONLY
typedef std::vector<MklDnnShape> MklDnnShapeList;
#else
typedef std::vector<MklShape> MklShapeList;
#endif
#ifdef INTEL_MKL_ML_ONLY
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
using mkldnn::stream;
template <typename T> class MklDnnData;
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
try {
if (!mkl_shape.IsMklTensor())
return mkl_tensor; // return input since it is already TF tensor
TensorShape output_shape = mkl_shape.GetTfShape();;
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(),
output_shape, &output_tensor);
auto cpu_engine = engine(engine::cpu, 0);
MklDnnData<T> input(&cpu_engine);
// Get Mkl layout of input tensor.
auto input_mkl_md = mkl_shape.GetMklLayout();
auto output_tf_md = mkl_shape.GetTfLayout();
auto output_tf_pd = memory::primitive_desc(output_tf_md, cpu_engine);
input.SetUsrMem(input_mkl_md, &mkl_tensor);
// reorder
if (input.IsReorderNeeded(output_tf_pd)) {
std::vector<primitive> net;
CHECK_EQ(input.CheckReorderToOpMem(output_tf_pd, &output_tensor, &net),
true);
stream(stream::kind::eager).submit(net).wait();
} else {
// If not, just forward input tensor to output tensor.
CHECK(output_tensor.CopyFrom(mkl_tensor, output_shape));
}
} catch (mkldnn::error& e) {
string error_msg = "Status: " + std::to_string(e.status) +
", message: " + string(e.message) + ", in file " +
string(__FILE__) + ":" + std::to_string(__LINE__);
LOG(FATAL) << "Operation received an exception: " << error_msg;
}
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#else
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML_ONLY
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML_ONLY
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
#ifdef INTEL_MKL_ML_ONLY
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
#ifdef INTEL_MKL_ML_ONLY
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#else
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML_ONLY
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#else
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
#endif
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
#ifdef INTEL_MKL_ML_ONLY
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
#endif
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
#ifdef INTEL_MKL_ML_ONLY
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
#endif
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML_ONLY
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML_ONLY
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML_ONLY
// Set a dummy MKLDNN shape (called when the output is in TF format)
inline void SetDummyMklDnnShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint32 idx_data_in,
uint32_t idx_data_out) {
uint32 idx_meta_in =
GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint32 idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifdef INTEL_MKL_ML_ONLY
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint32 idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
#endif
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML_ONLY
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
/// Map TensorFlow's data format into MKL-DNN 3D data format
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnn3DDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::ndhwc;
else if (format == FORMAT_NCHW)
return memory::format::ncdhw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
return memory::format::format_undef;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc || format == memory::format::ndhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw || format == memory::format::ncdhw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
inline memory::dims TFShapeToMklDnnDimsInNCDHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnn3DDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex<3>(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex<3>(format, 'C'));
int d = shape.dim_size(GetTensorDimIndex<3>(format, '0'));
int h = shape.dim_size(GetTensorDimIndex<3>(format, '1'));
int w = shape.dim_size(GetTensorDimIndex<3>(format, '2'));
// MKL-DNN requires dimensions in NCDHW format.
return memory::dims({n, c, d, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by preserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to);
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
// flat to indicate if data is 3D or not.
bool bIs3D;
/// Operations temp buffer
void* allocated_buffer_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
allocated_buffer_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
void SetIs3DData(bool bIs3D_) { bIs3D = bIs3D_; }
bool GetIs3D() { return bIs3D; }
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// allocate function for data buffer
inline void AllocateBuffer(size_t size) {
const int64 kMemoryAlginment = 64; // For AVX512 memory alignment.
allocated_buffer_ = cpu_allocator()->AllocateRaw(kMemoryAlginment, size);
}
inline void* GetAllocatedBuffer() { return allocated_buffer_; }
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
reorder_memory_ = new memory(op_pd);
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle) {
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net.push_back(FindOrCreateReorder<T>(user_memory_, reorder_memory_));
stream(stream::kind::eager).submit(net).wait();
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// CheckReorderToOpMem(..., std::vector<primitive>* net), will remove
/// slow path in the future
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor) {
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor));
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
/// TODO: this is a faster path with reorder primitive cache compared with
/// InsertReorderToUserMem(std::vector<primitive>* net), will remove
/// slow path in the future
inline void InsertReorderToUserMem() {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
// primitive reuse don't allow two same reorder prim in
// one stream, so submit it immediately
std::vector<primitive> net;
net.push_back(FindOrCreateReorder<T>(reorder_memory_, user_memory_));
stream(stream::kind::eager).submit(net).wait();
}
};
/// Base class for operations with reuse of primitives
///
class MklPrimitive {
public:
virtual ~MklPrimitive() {}
// Dummy data which MKL DNN never operates on
unsigned char* DummyData = nullptr;
};
const mkldnn::memory::dims NONE_DIMS = {};
template <typename T>
class MklPrimitiveFactory {
public:
MklPrimitiveFactory() {}
~MklPrimitiveFactory() {}
MklPrimitive* GetOp(const string& key) {
auto& map = MklPrimitiveFactory<T>::GetHashMap();
auto stream_iter = map.find(key);
if (stream_iter == map.end()) {
return nullptr;
} else {
CHECK(stream_iter->second != nullptr) << "nullptr present in map";
return stream_iter->second;
}
}
void SetOp(const string& key, MklPrimitive* op) {
auto& map = MklPrimitiveFactory<T>::GetHashMap();
auto stream_iter = map.find(key);
CHECK(stream_iter == map.end());
map[key] = op;
}
private:
static inline std::unordered_map<string, MklPrimitive*>& GetHashMap() {
static thread_local std::unordered_map<string, MklPrimitive*> map_;
return map_;
}
};
// utility class for creating keys of MKL primitive pool.
class FactoryKeyCreator {
public:
FactoryKeyCreator() {
key_.reserve(kMaxKeyLength);
}
~FactoryKeyCreator() {}
void AddAsKey(const string& str) { Append(str); }
void AddAsKey(const mkldnn::memory::dims &dims) {
for (unsigned int i = 0; i < dims.size(); i++) {
AddAsKey<int>(dims[i]);
}
}
template <typename T>
void AddAsKey(const T data) {
auto buffer = reinterpret_cast<const char *>(&data);
Append(StringPiece(buffer, sizeof(T)));
}
string GetKey() { return key_; }
private:
string key_;
const char delimiter = 'x';
const int kMaxKeyLength = 256;
void Append(StringPiece s) {
key_.append(s.ToString());
key_.append(1, delimiter);
}
};
static inline memory::format get_desired_format(int channel,
bool is_2d = true) {
memory::format fmt_desired = memory::format::any;
if (port::TestCPUFeature(port::CPUFeature::AVX512F)) {
fmt_desired = is_2d ? memory::format::nChw16c : memory::format::nCdhw16c;
} else if (port::TestCPUFeature(port::CPUFeature::AVX2) &&
(channel % 8) == 0) {
fmt_desired = is_2d
? memory::format::nChw8c
: memory::format::ncdhw; //not support avx2 for 3d yet.
} else {
fmt_desired = is_2d ? memory::format::nchw : memory::format::ncdhw;
}
return fmt_desired;
}
class MklReorderPrimitive : public MklPrimitive {
public:
explicit MklReorderPrimitive(const memory* from, const memory* to) {
Setup(from, to);
}
~MklReorderPrimitive() {}
std::shared_ptr<primitive> GetPrimitive() {
return context_.reorder_prim;
}
void SetMemory(const memory* from, const memory* to) {
context_.src_mem->set_data_handle(from->get_data_handle());
context_.dst_mem->set_data_handle(to->get_data_handle());
}
private:
struct ReorderContext {
std::shared_ptr<mkldnn::memory> src_mem;
std::shared_ptr<mkldnn::memory> dst_mem;
std::shared_ptr<primitive> reorder_prim;
ReorderContext():
src_mem(nullptr), dst_mem(nullptr), reorder_prim(nullptr) {
}
} context_;
engine cpu_engine_ = engine(engine::cpu, 0);
void Setup(const memory* from, const memory* to) {
context_.src_mem.reset(new memory(
{from->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.dst_mem.reset(new memory(
{to->get_primitive_desc().desc(), cpu_engine_}, DummyData));
context_.reorder_prim = std::make_shared<mkldnn::reorder>(
reorder(*context_.src_mem, *context_.dst_mem));
}
};
template <typename T>
class MklReorderPrimitiveFactory : public MklPrimitiveFactory<T> {
public:
static MklReorderPrimitive* Get(const memory* from, const memory* to) {
auto reorderPrim = static_cast<MklReorderPrimitive*>(
MklReorderPrimitiveFactory<T>::GetInstance().GetReorder(from, to));
if (reorderPrim == nullptr) {
reorderPrim = new MklReorderPrimitive(from, to);
MklReorderPrimitiveFactory<T>::GetInstance().SetReorder(from, to,
reorderPrim);
}
reorderPrim->SetMemory(from, to);
return reorderPrim;
}
static MklReorderPrimitiveFactory & GetInstance() {
static MklReorderPrimitiveFactory instance_;
return instance_;
}
private:
MklReorderPrimitiveFactory() {}
~MklReorderPrimitiveFactory() {}
static string CreateKey(const memory* from, const memory* to) {
string prefix = "reorder";
FactoryKeyCreator key_creator;
auto const &from_desc = from->get_primitive_desc().desc().data;
auto const &to_desc = to->get_primitive_desc().desc().data;
memory::dims from_dims(from_desc.dims, &from_desc.dims[from_desc.ndims]);
memory::dims to_dims(to_desc.dims, &to_desc.dims[to_desc.ndims]);
key_creator.AddAsKey(prefix);
key_creator.AddAsKey(static_cast<int>(from_desc.format));
key_creator.AddAsKey(static_cast<int>(from_desc.data_type));
key_creator.AddAsKey(from_dims);
key_creator.AddAsKey(static_cast<int>(to_desc.format));
key_creator.AddAsKey(static_cast<int>(to_desc.data_type));
key_creator.AddAsKey(to_dims);
return key_creator.GetKey();
}
MklPrimitive* GetReorder(const memory* from, const memory* to) {
string key = CreateKey(from, to);
return this->GetOp(key);
}
void SetReorder(const memory* from, const memory* to, MklPrimitive* op) {
string key = CreateKey(from, to);
this->SetOp(key, op);
}
};
/// Fuction to find(or create) a reorder from memory pointed by
/// from to memory pointed by to, it will created primitive or
/// get primitive from pool if it is cached.
/// Returns the primitive.
template <typename T>
inline primitive FindOrCreateReorder(const memory* from, const memory* to) {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
MklReorderPrimitive* reorder_prim =
MklReorderPrimitiveFactory<T>::Get(from, to);
return *reorder_prim->GetPrimitive();
}
#endif // INTEL_MKL_DNN
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
ChMatrix.h | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Authors: Alessandro Tasora, Radu Serban
// =============================================================================
#ifndef CHMATRIX_H
#define CHMATRIX_H
#include <immintrin.h>
#include "chrono/core/ChCoordsys.h"
#include "chrono/core/ChException.h"
#include "chrono/ChConfig.h"
#include "chrono/serialization/ChArchive.h"
#include "chrono/serialization/ChArchiveAsciiDump.h"
namespace chrono {
//
// FAST MACROS TO SPEEDUP CODE
//
#define Set33Element(a, b, val) SetElementN(((a * 3) + (b)), val)
#define Get33Element(a, b) GetElementN((a * 3) + (b))
#define Set34Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get34Element(a, b) GetElementN((a * 4) + (b))
#define Set34Row(ma, a, val0, val1, val2, val3) \
ma.SetElementN((a * 4), val0); \
ma.SetElementN((a * 4) + 1, val1); \
ma.SetElementN((a * 4) + 2, val2); \
ma.SetElementN((a * 4) + 3, val3);
#define Set44Element(a, b, val) SetElementN(((a * 4) + (b)), val)
#define Get44Element(a, b) GetElementN((a * 4) + (b))
// forward declaration
template <class Real = double>
class ChMatrixDynamic;
///
/// ChMatrix:
///
/// A base class for matrix objects (tables of NxM numbers).
/// To access elements, the indexes start from zero, and
/// you must indicate first row, then column, that is: m(2,4)
/// means the element at 3rd row, 5th column.
/// This is an abstract class, so you cannot instantiate
/// objects from it: you must rather create matrices using the
/// specialized child classes like ChMatrixDynamic, ChMatrixNM,
/// ChMatrix33 and so on; all of them have this same base class.
/// Warning: for optimization reasons, not all functions will
/// check about boundaries of element indexes and matrix sizes (in
/// some cases, if sizes are wrong, debug asserts are used).
///
/// Further info at the @ref mathematical_objects manual page.
template <class Real = double>
class ChMatrix {
protected:
//
// DATA
//
int rows;
int columns;
Real* address;
public:
//
// CONSTRUCTORS (none - abstract class that must be implemented with child classes)
//
virtual ~ChMatrix() {}
//
// OPERATORS OVERLOADING
//
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// For example: m(3,5) gets the element at the 4th row, 6th column.
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int row, const int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& operator()(const int row, const int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Parenthesis () operator, to access a single element of the matrix, by
/// supplying the ordinal of the element (indexes start from 0).
/// For example: m(3) gets the 4th element, counting row by row.
/// Mostly useful if the matrix is Nx1 sized (i.e. a N-element vector).
/// Value is returned by reference, so it can be modified, like in m(1,2)=10.
Real& operator()(const int el) {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
const Real& operator()(const int el) const {
assert(el >= 0 && el < rows * columns);
return (*(address + el));
}
/// The [] operator returns the address of the n-th row. This is mostly
/// for compatibility with old matrix programming styles (2d array-like)
/// where to access an element at row i, column j, one can write mymatrix[i][j].
Real* operator[](const int row) {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
const Real* operator[](const int row) const {
assert(row >= 0 && row < rows);
return ((address + (row * columns)));
}
/// Multiplies this matrix by a factor, in place
ChMatrix<Real>& operator*=(const Real factor) {
MatrScale(factor);
return *this;
}
/// Increments this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator+=(const ChMatrix<RealB>& matbis) {
MatrInc(matbis);
return *this;
}
/// Decrements this matrix by another matrix, in place
template <class RealB>
ChMatrix<Real>& operator-=(const ChMatrix<RealB>& matbis) {
MatrDec(matbis);
return *this;
}
/// Matrices are equal?
bool operator==(const ChMatrix<Real>& other) { return Equals(other); }
/// Matrices are not equal?
bool operator!=(const ChMatrix<Real>& other) { return !Equals(other); }
/// Assignment operator
ChMatrix<Real>& operator=(const ChMatrix<Real>& matbis) {
if (&matbis != this)
CopyFromMatrix(matbis);
return *this;
}
template <class RealB>
ChMatrix<Real>& operator=(const ChMatrix<RealB>& matbis) {
CopyFromMatrix(matbis);
return *this;
}
//
// FUNCTIONS
//
/// Sets the element at row,col position. Indexes start with zero.
void SetElement(int row, int col, Real elem) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
*(address + col + (row * columns)) = elem;
}
/// Gets the element at row,col position. Indexes start with zero.
/// The return value is a copy of original value. Use Element() instead if you
/// want to access directly by reference the original element.
Real GetElement(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
Real GetElement(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns); // boundary checks
return (*(address + col + (row * columns)));
}
/// Sets the Nth element, counting row after row.
void SetElementN(int index, Real elem) {
assert(index >= 0 && index < (rows * columns)); // boundary checks
*(address + index) = elem;
}
/// Gets the Nth element, counting row after row.
Real GetElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real GetElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access a single element of the matrix, by
/// supplying the row and the column (indexes start from 0).
/// Value is returned by reference, so it can be modified, like in m.Element(1,2)=10.
Real& Element(int row, int col) {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
const Real& Element(int row, int col) const {
assert(row >= 0 && col >= 0 && row < rows && col < columns);
return (*(address + col + (row * columns)));
}
/// Access a single element of the matrix, the Nth element, counting row after row.
/// Value is returned by reference, so it can be modified, like in m.Element(5)=10.
Real& ElementN(int index) {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
const Real& ElementN(int index) const {
assert(index >= 0 && index < (rows * columns));
return (*(address + index));
}
/// Access directly the "Real* address" buffer. Warning! this is a low level
/// function, it should be used in rare cases, if really needed!
Real* GetAddress() { return address; }
const Real* GetAddress() const { return address; }
/// Gets the number of rows
int GetRows() const { return rows; }
/// Gets the number of columns
int GetColumns() const { return columns; }
/// Reallocate memory for a new size. VIRTUAL! Must be implemented by child classes!
virtual void Resize(int nrows, int ncols) {}
/// Swaps the columns a and b
void SwapColumns(int a, int b) {
Real temp;
for (int i = 0; i < rows; i++) {
temp = GetElement(i, a);
SetElement(i, a, GetElement(i, b));
SetElement(i, b, temp);
}
}
/// Swap the rows a and b
void SwapRows(int a, int b) {
Real temp;
for (int i = 0; i < columns; i++) {
temp = GetElement(a, i);
SetElement(a, i, GetElement(b, i));
SetElement(b, i, temp);
}
}
/// Fill the diagonal elements, given a sample.
/// Note that the matrix must already be square (no check for
/// rectangular matrices!), and the extra-diagonal elements are
/// not modified -this function does not set them to 0-
void FillDiag(Real sample) {
for (int i = 0; i < rows; ++i)
SetElement(i, i, sample);
}
/// Fill the matrix with the same value in all elements
void FillElem(Real sample) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, sample);
}
/// Fill the matrix with random float numbers, falling within the
/// "max"/"min" range.
void FillRandom(Real max, Real min) {
for (int i = 0; i < rows * columns; ++i)
SetElementN(i, min + (Real)ChRandom() * (max - min));
}
/// Resets the matrix to zero (warning: simply sets memory to 0 bytes!)
void Reset() {
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to zeroes and (if needed) changes the size to have row and col
void Reset(int nrows, int ncols) {
Resize(nrows, ncols);
// SetZero(rows*columns); //memset(address, 0, sizeof(Real) * rows * columns);
for (int i = 0; i < rows * columns; ++i)
this->address[i] = 0;
}
/// Reset to identity matrix (ones on diagonal, zero elsewhere)
void SetIdentity() {
Reset();
FillDiag(1.0);
}
/// Copy a matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrix(const ChMatrix<RealB>& matra) {
Resize(matra.GetRows(), matra.GetColumns());
// ElementsCopy(address, matra.GetAddress(), rows*columns);
// memcpy (address, matra.address, (sizeof(Real) * rows * columns));
for (int i = 0; i < rows * columns; ++i)
address[i] = (Real)matra.GetAddress()[i];
}
/// Copy the transpose of matrix "matra" into this matrix. Note that
/// the destination matrix will be resized if necessary.
template <class RealB>
void CopyFromMatrixT(const ChMatrix<RealB>& matra) {
Resize(matra.GetColumns(), matra.GetRows());
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
SetElement(j, i, (Real)matra.Element(i, j));
}
/// Copy the transposed upper triangular part of "matra" in the lower triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTUpMatrix(const ChMatrix<RealB>& matra) // \ | |\ //
{ // \ A'| ---> | \ //
Resize(matra.GetRows(), matra.GetColumns()); // \ | |this\ //
for (int i = 0; i < matra.GetRows(); i++) { // \| |______\ //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(j, i, (Real)matra.GetElement(i, j));
}
}
/// Copy the transposed lower triangulat part of "matra" in the upper triangular
/// part of this matrix. (matra must be square)
/// Note that the destination matrix will be resized if necessary.
template <class RealB> // _______ //
void CopyTLwMatrix(const ChMatrix<RealB>& matra) // |\ \ | //
{ // | \ ---> \this| //
Resize(matra.GetRows(), matra.GetColumns()); // |A' \ \ | //
for (int i = 0; i < matra.GetRows(); i++) { // |______\ \| //
for (int j = 0; j < matra.GetRows(); j++)
SetElement(i, j, (Real)matra.GetElement(j, i));
}
}
//
// STREAMING
//
/// Method to allow serialization of transient data in archives.
virtual void ArchiveOUT(ChArchiveOut& marchive) {
// suggested: use versioning
marchive.VersionWrite(1);
// stream out all member data
marchive << make_ChNameValue("rows", rows);
marchive << make_ChNameValue("columns", columns);
// custom output of matrix data as array
if (ChArchiveAsciiDump* mascii = dynamic_cast<ChArchiveAsciiDump*>(&marchive)) {
// CUSTOM row x col 'intuitive' table-like log when using ChArchiveAsciiDump:
for (int i = 0; i < rows; i++) {
mascii->indent();
for (int j = 0; j < columns; j++) {
mascii->GetStream()->operator<<(Element(i, j));
mascii->GetStream()->operator<<(", ");
}
mascii->GetStream()->operator<<("\n");
}
} else {
// NORMAL array-based serialization:
int tot_elements = GetRows() * GetColumns();
marchive.out_array_pre("data", tot_elements, typeid(Real).name());
for (int i = 0; i < tot_elements; i++) {
marchive << CHNVP(ElementN(i), "");
marchive.out_array_between(tot_elements, typeid(Real).name());
}
marchive.out_array_end(tot_elements, typeid(Real).name());
}
}
/// Method to allow de serialization of transient data from archives.
virtual void ArchiveIN(ChArchiveIn& marchive) {
// suggested: use versioning
int version = marchive.VersionRead();
// stream in all member data
int m_row, m_col;
marchive >> make_ChNameValue("rows", m_row);
marchive >> make_ChNameValue("columns", m_col);
Reset(m_row, m_col);
// custom input of matrix data as array
size_t tot_elements = GetRows() * GetColumns();
marchive.in_array_pre("data", tot_elements);
for (int i = 0; i < tot_elements; i++) {
marchive >> CHNVP(ElementN(i));
marchive.in_array_between("data");
}
marchive.in_array_end("data");
}
/// Method to allow serializing transient data into in ascii
/// as a readable item, for example "chrono::GetLog() << myobject;"
/// ***OBSOLETE***
void StreamOUT(ChStreamOutAscii& mstream) {
mstream << "\n"
<< "Matrix " << GetRows() << " rows, " << GetColumns() << " columns."
<< "\n";
for (int i = 0; i < ChMin(GetRows(), 8); i++) {
for (int j = 0; j < ChMin(GetColumns(), 8); j++)
mstream << GetElement(i, j) << " ";
if (GetColumns() > 8)
mstream << "...";
mstream << "\n";
}
if (GetRows() > 8)
mstream << "... \n\n";
}
/// Method to allow serializing transient data into an ascii stream (ex. a file)
/// as a Matlab .dat file (all numbers in a row, separated by space, then CR)
void StreamOUTdenseMatlabFormat(ChStreamOutAscii& mstream) {
for (int ii = 0; ii < this->GetRows(); ii++) {
for (int jj = 0; jj < this->GetColumns(); jj++) {
mstream << this->GetElement(ii, jj);
if (jj < (this->GetColumns() - 1))
mstream << " ";
}
mstream << "\n";
}
}
//
// MATH MEMBER FUNCTIONS.
// For speed reasons, sometimes size checking of operands is left to the user!
//
/// Changes the sign of all the elements of this matrix, in place.
void MatrNeg() {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = -ElementN(nel);
}
/// Sum two matrices, and stores the result in "this" matrix: [this]=[A]+[B].
template <class RealB, class RealC>
void MatrAdd(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) + matrb.ElementN(nel));
}
/// Subtract two matrices, and stores the result in "this" matrix: [this]=[A]-[B].
template <class RealB, class RealC>
void MatrSub(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns() && matra.rows == matrb.GetRows());
assert(this->columns == matrb.GetColumns() && this->rows == matrb.GetRows());
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) = (Real)(matra.ElementN(nel) - matrb.ElementN(nel));
}
/// Increments this matrix with another matrix A, as: [this]+=[A]
template <class RealB>
void MatrInc(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) += (Real)matra.ElementN(nel);
}
/// Decrements this matrix with another matrix A, as: [this]-=[A]
template <class RealB>
void MatrDec(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) -= (Real)matra.ElementN(nel);
}
/// Scales a matrix, multiplying all elements by a constant value: [this]*=f
void MatrScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= factor;
}
/// Scales a matrix, multiplying all element by all oter elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) *= (Real)matra.ElementN(nel);
}
/// Scales a matrix, dividing all elements by a constant value: [this]/=f
void MatrDivScale(Real factor) {
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= factor;
}
/// Scales a matrix, dividing all element by all oter elements of
/// matra (it is not the classical matrix multiplication!)
template <class RealB>
void MatrDivScale(const ChMatrix<RealB>& matra) {
assert(matra.GetColumns() == columns && matra.GetRows() == rows);
for (int nel = 0; nel < rows * columns; ++nel)
ElementN(nel) /= (Real)matra.ElementN(nel);
}
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
template <class RealB, class RealC>
void MatrMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
#ifdef CHRONO_HAS_AVX
/// Multiplies two matrices, and stores the result in "this" matrix: [this]=[A]*[B].
/// AVX implementation: The speed up is marginal if size of the matrices are small, e.g. 3*3
/// Generally, as the matra.GetColumns() increases the method performs better
void MatrMultiplyAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetRows());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetColumns());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
double* this_Add = this->GetAddress();
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int colB = 0; colB < B_NCol; colB += 4) {
__m256d sum = _mm256_setzero_pd();
for (int elem = 0; elem < A_NCol; elem++) {
__m256d ymmA = _mm256_broadcast_sd(A_add + A_NCol * rowA + elem);
__m256d ymmB = _mm256_loadu_pd(B_add + elem * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
_mm256_storeu_pd(this_Add + rowA * B_NCol + colB, sum);
}
}
}
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Note: This method is faster than MatrMultiplyT if matra.GetColumns()%4=0 && matra.GetColumns()>8
/// It is still fast if matra.GetColumns() is large enough even if matra.GetColumns()%4!=0
void MatrMultiplyTAVX(const ChMatrix<double>& matra, const ChMatrix<double>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->GetRows() == matra.GetRows());
assert(this->GetColumns() == matrb.GetRows());
int A_Nrow = matra.GetRows();
int B_Nrow = matrb.GetRows();
int A_NCol = matra.GetColumns();
int B_NCol = matrb.GetColumns();
const double* A_add = matra.GetAddress();
const double* B_add = matrb.GetAddress();
bool NeedsPadding = (B_NCol % 4 != 0);
int CorrectFAT = ((B_NCol >> 2) << 2);
for (int rowA = 0; rowA < A_Nrow; rowA++) {
for (int rowB = 0; rowB < B_Nrow; rowB++) {
int colB;
double temp_sum = 0.0;
__m256d sum = _mm256_setzero_pd();
for (colB = 0; colB < CorrectFAT; colB += 4) {
__m256d ymmA = _mm256_loadu_pd(A_add + rowA * A_NCol + colB);
__m256d ymmB = _mm256_loadu_pd(B_add + rowB * B_NCol + colB);
__m256d prod = _mm256_mul_pd(ymmA, ymmB);
sum = _mm256_add_pd(sum, prod);
}
sum = _mm256_hadd_pd(sum, sum);
temp_sum = ((double*)&sum)[0] + ((double*)&sum)[2];
if (NeedsPadding)
for (colB = CorrectFAT; colB < B_NCol; colB++) {
temp_sum += (matra.Element(rowA, colB) * matrb.Element(rowB, colB));
}
SetElement(rowA, rowB, temp_sum);
}
}
}
#endif
/// Multiplies two matrices (the second is considered transposed): [this]=[A]*[B]'
/// Faster than doing B.MatrTranspose(); result.MatrMultiply(A,B);
/// Note: no check on mistaken size of this!
template <class RealB, class RealC>
void MatrMultiplyT(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetColumns() == matrb.GetColumns());
assert(this->rows == matra.GetRows());
assert(this->columns == matrb.GetRows());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetRows(); ++colres) {
for (row = 0; row < matra.GetRows(); ++row) {
sum = 0;
for (col = 0; col < matra.GetColumns(); ++col)
sum += (Real)(matra.Element(row, col) * matrb.Element(colres, col));
SetElement(row, colres, sum);
}
}
}
/// Multiplies two matrices (the first is considered transposed): [this]=[A]'*[B]
/// Faster than doing A.MatrTranspose(); result.MatrMultiply(A,B);
template <class RealB, class RealC>
void MatrTMultiply(const ChMatrix<RealB>& matra, const ChMatrix<RealC>& matrb) {
assert(matra.GetRows() == matrb.GetRows());
assert(this->rows == matra.GetColumns());
assert(this->columns == matrb.GetColumns());
int col, row, colres;
Real sum;
for (colres = 0; colres < matrb.GetColumns(); ++colres) {
for (row = 0; row < matra.GetColumns(); ++row) {
sum = 0;
for (col = 0; col < (matra.GetRows()); ++col)
sum += (Real)(matra.Element(col, row) * matrb.Element(col, colres));
SetElement(row, colres, sum);
}
}
}
/// Computes dot product between two column-matrices (vectors) with
/// same size. Returns a scalar value.
template <class RealB, class RealC>
static Real MatrDot(const ChMatrix<RealB>& ma, const ChMatrix<RealC>& mb) {
assert(ma.GetColumns() == mb.GetColumns() && ma.GetRows() == mb.GetRows());
Real tot = 0;
for (int i = 0; i < ma.GetRows(); ++i)
tot += (Real)(ma.ElementN(i) * mb.ElementN(i));
return tot;
}
/// Transpose this matrix in place
void MatrTranspose() {
if (columns == rows) // Square transp.is optimized
{
for (int row = 0; row < rows; ++row)
for (int col = row; col < columns; ++col)
if (row != col) {
Real temp = Element(row, col);
Element(row, col) = Element(col, row);
Element(col, row) = temp;
}
int tmpr = rows;
rows = columns;
columns = tmpr;
} else // Naive implementation for rectangular case. Not in-place. Slower.
{
ChMatrixDynamic<Real> matrcopy(*this);
int tmpr = rows;
rows = columns;
columns = tmpr; // dont' realloc buffer, anyway
for (int row = 0; row < rows; ++row)
for (int col = 0; col < columns; ++col)
Element(row, col) = matrcopy.Element(col, row);
}
}
/// Returns the determinant of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
Real Det() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix determinant because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix determinant because matr. larger than 3x3");
Real det = 0;
switch (this->GetRows()) {
case 1:
det = (*this)(0, 0);
break;
case 2:
det = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
break;
case 3:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(2, 0) * (*this)(1, 1) * (*this)(0, 2) -
(*this)(2, 1) * (*this)(1, 2) * (*this)(0, 0) - (*this)(2, 2) * (*this)(1, 0) * (*this)(0, 1);
break;
case 4:
det = (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3) +
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) +
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) +
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) -
(*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3) -
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) -
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1);
break;
}
return det;
}
/// Returns the inverse of the matrix.
/// Note! This method must be used only with max 4x4 matrices,
/// otherwise it throws an exception.
void MatrInverse() {
assert(this->GetRows() == this->GetColumns());
assert(this->GetRows() <= 4);
assert(this->Det() != 0);
if (this->GetRows() != this->GetColumns())
throw("Cannot compute matrix inverse because rectangular matrix");
if (this->GetRows() > 4)
throw("Cannot compute matrix inverse because matr. larger than 4x4");
if (this->Det() == 0)
throw("Cannot compute matrix inverse because singular matrix");
switch (this->GetRows()) {
case 1:
(*this)(0, 0) = (1 / (*this)(0, 0));
break;
case 2: {
ChMatrixDynamic<Real> inv(2, 2);
inv(0, 0) = (*this)(1, 1);
inv(0, 1) = -(*this)(0, 1);
inv(1, 1) = (*this)(0, 0);
inv(1, 0) = -(*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 3: {
ChMatrixDynamic<Real> inv(3, 3);
inv(0, 0) = (*this)(1, 1) * (*this)(2, 2) - (*this)(1, 2) * (*this)(2, 1);
inv(0, 1) = (*this)(2, 1) * (*this)(0, 2) - (*this)(0, 1) * (*this)(2, 2);
inv(0, 2) = (*this)(0, 1) * (*this)(1, 2) - (*this)(0, 2) * (*this)(1, 1);
inv(1, 0) = (*this)(1, 2) * (*this)(2, 0) - (*this)(1, 0) * (*this)(2, 2);
inv(1, 1) = (*this)(2, 2) * (*this)(0, 0) - (*this)(2, 0) * (*this)(0, 2);
inv(1, 2) = (*this)(0, 2) * (*this)(1, 0) - (*this)(1, 2) * (*this)(0, 0);
inv(2, 0) = (*this)(1, 0) * (*this)(2, 1) - (*this)(1, 1) * (*this)(2, 0);
inv(2, 1) = (*this)(0, 1) * (*this)(2, 0) - (*this)(0, 0) * (*this)(2, 1);
inv(2, 2) = (*this)(0, 0) * (*this)(1, 1) - (*this)(0, 1) * (*this)(1, 0);
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
case 4: {
ChMatrixDynamic<Real> inv(4, 4);
inv.SetElement(
0, 0,
(*this)(1, 2) * (*this)(2, 3) * (*this)(3, 1) - (*this)(1, 3) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 3) * (*this)(2, 1) * (*this)(3, 2) - (*this)(1, 1) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 3) + (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 1,
(*this)(0, 3) * (*this)(2, 2) * (*this)(3, 1) - (*this)(0, 2) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 2) + (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(2, 1) * (*this)(3, 3) - (*this)(0, 1) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
0, 2,
(*this)(0, 2) * (*this)(1, 3) * (*this)(3, 1) - (*this)(0, 3) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 3) * (*this)(1, 1) * (*this)(3, 2) - (*this)(0, 1) * (*this)(1, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 3) + (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
0, 3,
(*this)(0, 3) * (*this)(1, 2) * (*this)(2, 1) - (*this)(0, 2) * (*this)(1, 3) * (*this)(2, 1) -
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 2) + (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 2) +
(*this)(0, 2) * (*this)(1, 1) * (*this)(2, 3) - (*this)(0, 1) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
1, 0,
(*this)(1, 3) * (*this)(2, 2) * (*this)(3, 0) - (*this)(1, 2) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 2) + (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 2) +
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 3) - (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 1,
(*this)(0, 2) * (*this)(2, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(2, 2) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 2) -
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 3));
inv.SetElement(
1, 2,
(*this)(0, 3) * (*this)(1, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(1, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 2) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 3));
inv.SetElement(
1, 3,
(*this)(0, 2) * (*this)(1, 3) * (*this)(2, 0) - (*this)(0, 3) * (*this)(1, 2) * (*this)(2, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 2) - (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 2) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 3) + (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 3));
inv.SetElement(
2, 0,
(*this)(1, 1) * (*this)(2, 3) * (*this)(3, 0) - (*this)(1, 3) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(1, 3) * (*this)(2, 0) * (*this)(3, 1) - (*this)(1, 0) * (*this)(2, 3) * (*this)(3, 1) -
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 3) + (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 1,
(*this)(0, 3) * (*this)(2, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(2, 3) * (*this)(3, 0) -
(*this)(0, 3) * (*this)(2, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(2, 3) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 3) - (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 3));
inv.SetElement(
2, 2,
(*this)(0, 1) * (*this)(1, 3) * (*this)(3, 0) - (*this)(0, 3) * (*this)(1, 1) * (*this)(3, 0) +
(*this)(0, 3) * (*this)(1, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(1, 3) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 3) + (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 3));
inv.SetElement(
2, 3,
(*this)(0, 3) * (*this)(1, 1) * (*this)(2, 0) - (*this)(0, 1) * (*this)(1, 3) * (*this)(2, 0) -
(*this)(0, 3) * (*this)(1, 0) * (*this)(2, 1) + (*this)(0, 0) * (*this)(1, 3) * (*this)(2, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 3) - (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 3));
inv.SetElement(
3, 0,
(*this)(1, 2) * (*this)(2, 1) * (*this)(3, 0) - (*this)(1, 1) * (*this)(2, 2) * (*this)(3, 0) -
(*this)(1, 2) * (*this)(2, 0) * (*this)(3, 1) + (*this)(1, 0) * (*this)(2, 2) * (*this)(3, 1) +
(*this)(1, 1) * (*this)(2, 0) * (*this)(3, 2) - (*this)(1, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 1,
(*this)(0, 1) * (*this)(2, 2) * (*this)(3, 0) - (*this)(0, 2) * (*this)(2, 1) * (*this)(3, 0) +
(*this)(0, 2) * (*this)(2, 0) * (*this)(3, 1) - (*this)(0, 0) * (*this)(2, 2) * (*this)(3, 1) -
(*this)(0, 1) * (*this)(2, 0) * (*this)(3, 2) + (*this)(0, 0) * (*this)(2, 1) * (*this)(3, 2));
inv.SetElement(
3, 2,
(*this)(0, 2) * (*this)(1, 1) * (*this)(3, 0) - (*this)(0, 1) * (*this)(1, 2) * (*this)(3, 0) -
(*this)(0, 2) * (*this)(1, 0) * (*this)(3, 1) + (*this)(0, 0) * (*this)(1, 2) * (*this)(3, 1) +
(*this)(0, 1) * (*this)(1, 0) * (*this)(3, 2) - (*this)(0, 0) * (*this)(1, 1) * (*this)(3, 2));
inv.SetElement(
3, 3,
(*this)(0, 1) * (*this)(1, 2) * (*this)(2, 0) - (*this)(0, 2) * (*this)(1, 1) * (*this)(2, 0) +
(*this)(0, 2) * (*this)(1, 0) * (*this)(2, 1) - (*this)(0, 0) * (*this)(1, 2) * (*this)(2, 1) -
(*this)(0, 1) * (*this)(1, 0) * (*this)(2, 2) + (*this)(0, 0) * (*this)(1, 1) * (*this)(2, 2));
inv.MatrDivScale(this->Det());
this->CopyFromMatrix(inv);
break;
}
}
}
/// Returns true if vector is identical to other matrix
bool Equals(const ChMatrix<Real>& other) { return Equals(other, 0.0); }
/// Returns true if vector equals another vector, within a tolerance 'tol'
bool Equals(const ChMatrix<Real>& other, Real tol) {
if ((other.GetColumns() != this->columns) || (other.GetRows() != this->rows))
return false;
for (int nel = 0; nel < rows * columns; ++nel)
if (fabs(ElementN(nel) - other.ElementN(nel)) > tol)
return false;
return true;
}
/// Multiplies this 3x4 matrix by a quaternion, as v=[G]*q
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a vector.
template <class RealB>
ChVector<Real> Matr34_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 3) && (columns == 4));
return ChVector<Real>(Get34Element(0, 0) * (Real)qua.e0() + Get34Element(0, 1) * (Real)qua.e1() +
Get34Element(0, 2) * (Real)qua.e2() + Get34Element(0, 3) * (Real)qua.e3(),
Get34Element(1, 0) * (Real)qua.e0() + Get34Element(1, 1) * (Real)qua.e1() +
Get34Element(1, 2) * (Real)qua.e2() + Get34Element(1, 3) * (Real)qua.e3(),
Get34Element(2, 0) * (Real)qua.e0() + Get34Element(2, 1) * (Real)qua.e1() +
Get34Element(2, 2) * (Real)qua.e2() + Get34Element(2, 3) * (Real)qua.e3());
}
/// Multiplies this 3x4 matrix (transposed) by a vector, as q=[G]'*v
/// The matrix must be 3x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr34T_x_Vect(const ChVector<RealB>& va) {
assert((rows == 3) && (columns == 4));
return ChQuaternion<Real>(
Get34Element(0, 0) * (Real)va.x() + Get34Element(1, 0) * (Real)va.y() + Get34Element(2, 0) * (Real)va.z(),
Get34Element(0, 1) * (Real)va.x() + Get34Element(1, 1) * (Real)va.y() + Get34Element(2, 1) * (Real)va.z(),
Get34Element(0, 2) * (Real)va.x() + Get34Element(1, 2) * (Real)va.y() + Get34Element(2, 2) * (Real)va.z(),
Get34Element(0, 3) * (Real)va.x() + Get34Element(1, 3) * (Real)va.y() + Get34Element(2, 3) * (Real)va.z());
}
/// Multiplies this 4x4 matrix (transposed) by a quaternion,
/// The matrix must be 4x4.
/// \return The result of the multiplication, i.e. a quaternion.
template <class RealB>
ChQuaternion<Real> Matr44_x_Quat(const ChQuaternion<RealB>& qua) {
assert((rows == 4) && (columns == 4));
return ChQuaternion<Real>(Get44Element(0, 0) * (Real)qua.e0() + Get44Element(0, 1) * (Real)qua.e1() +
Get44Element(0, 2) * (Real)qua.e2() + Get44Element(0, 3) * (Real)qua.e3(),
Get44Element(1, 0) * (Real)qua.e0() + Get44Element(1, 1) * (Real)qua.e1() +
Get44Element(1, 2) * (Real)qua.e2() + Get44Element(1, 3) * (Real)qua.e3(),
Get44Element(2, 0) * (Real)qua.e0() + Get44Element(2, 1) * (Real)qua.e1() +
Get44Element(2, 2) * (Real)qua.e2() + Get44Element(2, 3) * (Real)qua.e3(),
Get44Element(3, 0) * (Real)qua.e0() + Get44Element(3, 1) * (Real)qua.e1() +
Get44Element(3, 2) * (Real)qua.e2() + Get44Element(3, 3) * (Real)qua.e3());
}
/// Transposes only the lower-right 3x3 submatrix of a hemisymmetric 4x4 matrix,
/// used when the 4x4 matrix is a "star" matrix [q] coming from a quaternion q:
/// the non commutative quat. product is:
/// q1 x q2 = [q1]*q2 = [q2st]*q1
/// where [q2st] is the "semi-transpose of [q2].
void MatrXq_SemiTranspose() {
SetElement(1, 2, -GetElement(1, 2));
SetElement(1, 3, -GetElement(1, 3));
SetElement(2, 1, -GetElement(2, 1));
SetElement(2, 3, -GetElement(2, 3));
SetElement(3, 1, -GetElement(3, 1));
SetElement(3, 2, -GetElement(3, 2));
}
/// Change the sign of the 2nd, 3rd and 4th columns of a 4x4 matrix,
/// The product between a quaternion q1 and the conjugate of q2 (q2'), is:
/// q1 x q2' = [q1]*q2' = [q1sn]*q2
/// where [q1sn] is the semi-negation of the 4x4 matrix [q1].
void MatrXq_SemiNeg() {
for (int i = 0; i < rows; ++i)
for (int j = 1; j < columns; ++j)
SetElement(i, j, -GetElement(i, j));
}
/// Gets the norm infinite of the matrix, i.e. the max.
/// of its elements in absolute value.
Real NormInf() {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
if ((fabs(ElementN(nel))) > norm)
norm = fabs(ElementN(nel));
return norm;
}
/// Gets the norm two of the matrix, i.e. the square root
/// of the sum of the elements squared.
Real NormTwo() {
Real norm = 0;
for (int nel = 0; nel < rows * columns; ++nel)
norm += ElementN(nel) * ElementN(nel);
return (sqrt(norm));
}
/// Finds max value among the values of the matrix
Real Max() {
Real mmax = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) > mmax)
mmax = ElementN(nel);
return mmax;
}
/// Finds min value among the values of the matrix
Real Min() {
Real mmin = GetElement(0, 0);
for (int nel = 0; nel < rows * columns; ++nel)
if (ElementN(nel) < mmin)
mmin = ElementN(nel);
return mmin;
}
/// Linear interpolation of two matrices. Parameter mx must be 0...1.
/// [this] =(1-x)[A]+ (x)[B] Matrices must have the same size!!
void LinInterpolate(const ChMatrix<Real>& matra, const ChMatrix<Real>& matrb, Real mx) {
assert(matra.columns == matrb.columns && matra.rows == matrb.rows);
for (int nel = 0; nel < rows * columns; nel++)
ElementN(nel) = matra.ElementN(nel) * (1 - mx) + matrb.ElementN(nel) * (mx);
}
/// Fills a matrix or a vector with a bilinear interpolation,
/// from corner values (as a u-v patch).
void RowColInterp(Real vmin, Real vmax, Real umin, Real umax) {
for (int iu = 0; iu < GetColumns(); iu++)
for (int iv = 0; iv < GetRows(); iv++) {
if (GetRows() > 1)
Element(iv, iu) = vmin + (vmax - vmin) * ((Real)iv / ((Real)(GetRows() - 1)));
if (GetColumns() > 1)
Element(iv, iu) += umin + (umax - umin) * ((Real)iu / ((Real)(GetColumns() - 1)));
}
}
//
// BOOKKEEPING
//
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra" into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(i + insrow, j + inscol) += (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol.
/// Normal copy for insrow=inscol=0
template <class RealB>
void PasteTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) = (Real)matra.Element(i, j);
}
/// Paste a matrix "matra", transposed, into "this", inserting at location insrow-inscol
/// and performing a sum with the preexisting values.
template <class RealB>
void PasteSumTranspMatrix(const ChMatrix<RealB>& matra, int insrow, int inscol) {
for (int i = 0; i < matra.GetRows(); ++i)
for (int j = 0; j < matra.GetColumns(); ++j)
Element(j + insrow, i + inscol) += (Real)matra.Element(i, j);
}
/// Paste a clipped portion of the matrix "matra" into "this",
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + insrow, j + inscol) = (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a clipped portion of the matrix "matra" into "this", where "this"
/// is a vector (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedMatrixToVector(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
ElementN(insindex + i * ncolumns + j) = (Real)matra.Element(cliprow + i, clipcol + j);
}
/// Paste a clipped portion of a vector into "this", where "this"
/// is a matrix (of ChMatrix type),
/// inserting the clip (of size nrows, ncolumns) at the location insindex.
template <class RealB>
void PasteClippedVectorToMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insindex) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
Element(i + cliprow, j + clipcol) = (Real)matra.ElementN(insindex + i * ncolumns + j);
}
/// Paste a clipped portion of the matrix "matra" into "this", performing a sum with preexisting values,
/// inserting the clip (of size nrows, ncolumns) at the location insrow-inscol.
template <class RealB>
void PasteSumClippedMatrix(const ChMatrix<RealB>& matra,
int cliprow,
int clipcol,
int nrows,
int ncolumns,
int insrow,
int inscol) {
for (int i = 0; i < nrows; ++i)
for (int j = 0; j < ncolumns; ++j)
#pragma omp atomic
Element(i + insrow, j + inscol) += (Real)matra.Element(i + cliprow, j + clipcol);
}
/// Paste a vector "va" into the matrix.
template <class RealB>
void PasteVector(const ChVector<RealB>& va, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)va.x());
SetElement(insrow + 1, inscol, (Real)va.y());
SetElement(insrow + 2, inscol, (Real)va.z());
}
/// Paste a vector "va" into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)va.x();
Element(insrow + 1, inscol) += (Real)va.y();
Element(insrow + 2, inscol) += (Real)va.z();
}
/// Paste a vector "va" into the matrix, subtracting it from preexisting values.
template <class RealB>
void PasteSubVector(const ChVector<RealB>& va, int insrow, int inscol) {
Element(insrow + 0, inscol) -= (Real)va.x();
Element(insrow + 1, inscol) -= (Real)va.y();
Element(insrow + 2, inscol) -= (Real)va.z();
}
/// Paste a quaternion into the matrix.
template <class RealB>
void PasteQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
SetElement(insrow + 0, inscol, (Real)qa.e0());
SetElement(insrow + 1, inscol, (Real)qa.e1());
SetElement(insrow + 2, inscol, (Real)qa.e2());
SetElement(insrow + 3, inscol, (Real)qa.e3());
}
/// Paste a quaternion into the matrix, summing it with preexisting values.
template <class RealB>
void PasteSumQuaternion(const ChQuaternion<RealB>& qa, int insrow, int inscol) {
Element(insrow + 0, inscol) += (Real)qa.e0();
Element(insrow + 1, inscol) += (Real)qa.e1();
Element(insrow + 2, inscol) += (Real)qa.e2();
Element(insrow + 3, inscol) += (Real)qa.e3();
}
/// Paste a coordsys into the matrix.
template <class RealB>
void PasteCoordsys(const ChCoordsys<RealB>& cs, int insrow, int inscol) {
PasteVector(cs.pos, insrow, inscol);
PasteQuaternion(cs.rot, insrow + 3, inscol);
}
/// Returns the vector clipped from insrow, inscol.
ChVector<Real> ClipVector(int insrow, int inscol) const {
return ChVector<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol));
}
/// Returns the quaternion clipped from insrow, inscol.
ChQuaternion<Real> ClipQuaternion(int insrow, int inscol) const {
return ChQuaternion<Real>(Element(insrow, inscol), Element(insrow + 1, inscol), Element(insrow + 2, inscol),
Element(insrow + 3, inscol));
}
/// Returns the coordsys clipped from insrow, inscol.
ChCoordsys<Real> ClipCoordsys(int insrow, int inscol) const {
return ChCoordsys<Real>(ClipVector(insrow, inscol), ClipQuaternion(insrow + 3, inscol));
}
//
// MULTIBODY SPECIFIC MATH FUCTION
//
/// Fills a 4x4 matrix as the "star" matrix, representing quaternion cross product.
/// That is, given two quaternions a and b, aXb= [Astar]*b
template <class RealB>
void Set_Xq_matrix(const ChQuaternion<RealB>& q) {
Set44Element(0, 0, (Real)q.e0());
Set44Element(0, 1, -(Real)q.e1());
Set44Element(0, 2, -(Real)q.e2());
Set44Element(0, 3, -(Real)q.e3());
Set44Element(1, 0, (Real)q.e1());
Set44Element(1, 1, (Real)q.e0());
Set44Element(1, 2, -(Real)q.e3());
Set44Element(1, 3, (Real)q.e2());
Set44Element(2, 0, (Real)q.e2());
Set44Element(2, 1, (Real)q.e3());
Set44Element(2, 2, (Real)q.e0());
Set44Element(2, 3, -(Real)q.e1());
Set44Element(3, 0, (Real)q.e3());
Set44Element(3, 1, -(Real)q.e2());
Set44Element(3, 2, (Real)q.e1());
Set44Element(3, 3, (Real)q.e0());
}
};
} // end namespace chrono
#endif
|
GB_unop__identity_uint8_bool.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_bool)
// op(A') function: GB (_unop_tran__identity_uint8_bool)
// C type: uint8_t
// A type: bool
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
bool aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_bool)
(
uint8_t *Cx, // Cx and Ax may be aliased
const bool *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (bool), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
bool aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_bool)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr.c |
/*
Author: Mohammed Ahmed Al Farhan
Email: mohammed.farhan@kaust.edu.sa
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <omp.h>
#include "inc/allocator.h"
#include "inc/geometry.h"
#include "inc/msh/mesh.h"
/* c stdlib qsort comparable function */
static inline int
comp(const void *restrict a, const void *restrict b)
{
return (*((uint32_t *) a) - *((uint32_t *) b));
}
void
m2csr(struct geometry *restrict g)
{
/* Row pointers */
uint32_t *restrict ia;
#if defined(__USE_MEMKIND) && defined(__USE_POSIX_HBW)
ia = calloc((g->n->sz + 1), sizeof(uint32_t));
#else
kcalloc((g->n->sz+1), sizeof(uint32_t), (void *) &ia);
#endif
uint32_t i;
for(i = 0; i < g->e->sz; i++)
{
ia[g->e->eptr->n0[i]+1]++;
ia[g->e->eptr->n1[i]+1]++;
}
ia[0] = 1;
for(i = 1; i <= g->n->sz; i++)
{
ia[i] += ia[i-1];
ia[i]++;
}
/* Adjust the IA array to Zero-index (c-style) */
for(i = 0; i <= g->n->sz; i++) ia[i]--;
uint32_t *restrict ja;
#if defined(__USE_MEMKIND) && defined(__USE_POSIX_HBW)
ja = calloc(ia[g->n->sz], sizeof(uint32_t));
#else
kmalloc(ia[g->n->sz], sizeof(uint32_t), (void *) &ja);
#endif
double *aa = (double *) calloc(ia[g->n->sz] * 4 * 4, sizeof(double));
/* A temp buffer used to keep tracking of each row elements */
uint32_t *restrict buf;
kmalloc(g->n->sz, sizeof(uint32_t), (void *) &buf);
/* Column Index of the diagonal elements */
for(i = 0; i < g->n->sz; i++)
{
ja[ia[i]] = i; // A diagonal element
buf[i] = 1; // One element in this row has been added
}
/* Fill the rest of the array, ordered by RCM and using a
* modified version of Breadth-First Search traversing algorithm */
for(i = 0; i < g->e->sz; i++)
{
uint32_t n0 = g->e->eptr->n0[i];
uint32_t n1 = g->e->eptr->n1[i];
/* Get the element index in the row
* The index is basically the row index plus the last element that
* has been added in the row. */
uint32_t indx = ia[n0] + buf[n0]; // Get the index
buf[n0]++; // Column has been added (one more element in the row)
ja[indx] = n1; // Store the node index in its corresponding index
/* Do it for the other endpoint */
indx = ia[n1] + buf[n1];
buf[n1]++;
ja[indx] = n0;
}
kfree(buf);
// Number of nonzero block per row
// uint32_t *restrict nnz;
// kmalloc(g->n->sz, sizeof(uint32_t), (void *) &nnz);
size_t nz_total = 0;
/* Sort the each row of a ja array in an increasing order
* No we reorder them again to make sure the at each row
* we have the node ordered in increasing order plus based on
* their degree */
#pragma omp parallel for reduction(+:nz_total)
for(i = 0; i < g->n->sz; i++)
{
uint32_t jstart = ia[i];
uint32_t jend = ia[i+1];
/* Qsort to sort the JA array */
uint32_t * l = ja + jstart; // Low address
uint32_t * h = ja + jend; // High address
size_t sz = h - l;
qsort(l, sz, sizeof(uint32_t), comp);
uint32_t nz = 0;
uint32_t j;
for(j = jstart; j < jend; j++) nz++;
// nnz[i] = nz;
nz_total += nz;
}
g->c->aa = aa;
g->c->ia = ia; // Starting row indices
g->c->ja = ja; // Column indices
g->c->nnz = nz_total; // Number of nonzero blocks
#ifdef __USE_COMPRESSIBLE_FLOW
/* Compressible Euler flow */
g->c->bsz = 5; // 5 unknowns per grid point
#else
/* Incompressible Euler flow */
g->c->bsz = 4; // 4 unknowns per grid point
g->c->bsz2 = 4*4; // 4 unknowns per grid point
#endif
int *ilen = (int *) calloc(g->n->sz, sizeof(int));
g->c->ailen = ilen;
/* Number of the matrix rows | columns */
g->c->sz = g->c->bsz * g->n->sz;
}
|
GB_binop__plus_fc32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_fc32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc32)
// A*D function (colscale): GB (_AxD__plus_fc32)
// D*A function (rowscale): GB (_DxB__plus_fc32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_fc32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_fc32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc32)
// C=scalar+B GB (_bind1st__plus_fc32)
// C=scalar+B' GB (_bind1st_tran__plus_fc32)
// C=A+scalar GB (_bind2nd__plus_fc32)
// C=A'+scalar GB (_bind2nd_tran__plus_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// A pattern? 0
// B type: GxB_FC32_t
// B pattern? 0
// BinaryOp: cij = GB_FC32_add (aij, bij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_BTYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC32_add (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_FC32 || GxB_NO_PLUS_FC32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_fc32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC32_t
GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC32_t alpha_scalar ;
GxB_FC32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC32_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_fc32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_fc32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_fc32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ;
GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC32_add (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_fc32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ;
GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC32_add (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_add (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_fc32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC32_add (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
irbuilder_unroll_unroll_partial_heuristic.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_unroll_partial_heuristic(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 8
// CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 8
// CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
// CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP7:.+]] = icmp eq i32 %[[OMP_FLOOR0_IV]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP8:.+]] = select i1 %[[TMP7]], i32 %[[TMP4]], i32 8
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP8]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP9:.+]] = mul nuw i32 8, %[[OMP_FLOOR0_IV]]
// CHECK-NEXT: %[[TMP10:.+]] = add nuw i32 %[[TMP9]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP10]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP11:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP12:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP12]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP11]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP13:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP14:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP15:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP15]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP14]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP16:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP13]], %[[TMP16]]
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP19]]
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]], !llvm.loop ![[LOOP6:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_unroll_partial_heuristic(float *a, float *b, float *c, float *d) {
#pragma omp unroll partial
#pragma omp unroll partial
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP8]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP9:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP9]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 8}
// CHECK: ![[LOOP6]] = distinct !{![[LOOP6]], ![[LOOPPROP4]]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.