source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
dataset.h | /*
Copyright (c) 2016, TU Dresden
Copyright (c) 2017, Heidelberg University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the TU Dresden, Heidelberg University nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TU DRESDEN OR HEIDELBERG UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#pragma once
#include "properties.h"
#include "util.h"
#include "read_data.h"
#include <stdexcept>
namespace jp
{
/**
* @brief Calculate the camera coordinate given a pixel position and a depth value.
*
* @param x X component of the pixel position.
* @param y Y component of the pixel position.
* @param depth Depth value at that position in mm.
* @return jp::coord3_t Camera coordinate.
*/
inline cv::Mat_<double> pxToEye(double x, double y, double depth)
{
cv::Mat_<double> eye = cv::Mat_<double>::zeros(3, 1);
if(depth == 0)
return eye;
GlobalProperties* gp = GlobalProperties::getInstance();
eye(0, 0) = ((x - (gp->dP.imageWidth / 2.0 + gp->dP.xShift)) / (gp->dP.focalLength / depth));
eye(1, 0) = ((y - (gp->dP.imageHeight / 2.0 + gp->dP.yShift)) / (gp->dP.focalLength / depth));
eye(2, 0) = (jp::coord1_t) depth;
return eye;
}
/**
* @brief Class that is a interface for reading and writing object specific data.
*
*/
class Dataset
{
public:
Dataset()
{
}
/**
* @brief Constructor.
*
* @param basePath The directory with subdirectories "rgb", "depth" and "poses".
*/
Dataset(const std::string& basePath)
{
readFileNames(basePath);
}
/**
* @brief Size of the dataset (number of frames).
*
* @return size_t Size.
*/
size_t size() const
{
return bgrFiles.size();
}
/**
* @brief Return the RGB image file name of the given frame number.
*
* @param i Frame number.
* @return std::string File name.
*/
std::string getFileName(size_t i) const
{
return bgrFiles[i];
}
/**
* @brief Get ground truth pose for the given frame.
*
* @param i Frame number.
* @return bool Returns false if there is no valid ground truth for this frame.
*/
bool getPose(size_t i, jp::cv_trans_t& pose) const
{
if(infoFiles.empty())
return false;
if(!readData(infoFiles[i], pose))
return false;
return true;
}
/**
* @brief Get the RGB image of the given frame.
*
* @param i Frame number.
* @param img Output parameter. RGB image.
* @return void
*/
void getBGR(size_t i, jp::img_bgr_t& img) const
{
std::string bgrFile = bgrFiles[i];
readData(bgrFile, img);
GlobalProperties* gp = GlobalProperties::getInstance();
int imgWidth = gp->dP.imageWidth;
int imgHeight = gp->dP.imageHeight;
int imgPadding = gp->dP.imgPadding;
// add zero padding to image for random shifting in training mode
int realImgWidth = gp->getCNNInputDimX();
int realImgHeight = gp->getCNNInputDimY();
// rescale input image
if((img.cols != realImgWidth) || (img.rows != realImgHeight))
cv::resize(img, img, cv::Size(realImgWidth, realImgHeight));
jp::img_bgr_t imgPadded = jp::img_bgr_t::zeros(imgHeight, imgWidth);
img.copyTo(imgPadded.colRange(imgPadding, imgPadding + img.cols).rowRange(imgPadding, imgPadding + img.rows));
img = imgPadded;
}
/**
* @brief Get the depth image of the given frame.
*
* If RGB and Depth are not registered (rawData flag in properties.h), Depth will be
* mapped to RGB using calibration parameters and the external sensor transformation matrix.
* If the constD paramters has a positive value, the depth channel will be filled with this value.
*
* @param i Frame number.
* @param img Output parameter. depth image.
* @return void
*/
void getDepth(size_t i, jp::img_depth_t& img) const
{
if(GlobalProperties::getInstance()->dP.constD > 0)
{
// return constant depth channel
img = jp::img_depth_t::ones(
GlobalProperties::getInstance()->dP.imageHeight,
GlobalProperties::getInstance()->dP.imageWidth);
img *= GlobalProperties::getInstance()->dP.constD * 1000;
}
else
{
std::string dFile = depthFiles[i];
readData(dFile, img);
// zero pad image for random shifting in training mode
int imgPadding = GlobalProperties::getInstance()->dP.imgPadding;
jp::img_depth_t imgPadded = jp::img_depth_t::zeros(img.rows + 2 * imgPadding, img.cols + 2 * imgPadding);
img.copyTo(imgPadded.colRange(imgPadding, imgPadding + img.cols).rowRange(imgPadding, imgPadding + img.rows));
img = imgPadded;
}
}
/**
* @brief Get the RGB-D image of the given frame.
*
* @param i Frame number.
* @param img Output parameter. RGB-D image.
* @return void
*/
void getBGRD(size_t i, jp::img_bgrd_t& img) const
{
getBGR(i, img.bgr);
getDepth(i, img.depth);
}
/**
* @brief Get the ground truth object coordinate image of the given frame.
*
* Object coordinates will be generated from image depth and the ground truth pose.
*
* @param i Frame number.
* @param img Output parameter. Object coordinate image.
* @return void
*/
void getObj(size_t i, jp::img_coord_t& img) const
{
jp::img_depth_t depthData;
getDepth(i, depthData);
// get ground truth pose
jp::cv_trans_t poseData;
getPose(i, poseData);
cv::Mat rot;
cv::Rodrigues(poseData.first, rot);
img = jp::img_coord_t(depthData.rows, depthData.cols);
#pragma omp parallel for
for(unsigned x = 0; x < img.cols; x++)
for(unsigned y = 0; y < img.rows; y++)
{
if(depthData(y, x) == 0)
{
img(y, x) = jp::coord3_t(0, 0, 0);
continue;
}
// transform depth to camera coordinate
cv::Mat_<double> eye = pxToEye(x, y, depthData(y, x) / 1000.0);
// transform camera coordinte to object coordinate
eye = eye - poseData.second;
eye = rot.t() * eye;
img(y, x) = jp::coord3_t(eye(0, 0), eye(1, 0), eye(2, 0));
}
}
private:
/**
* @brief Reads all file names in the various sub-folders of a dataset.
*
* @param basePath Folder where all data sub folders lie.
* @return void
*/
void readFileNames(const std::string& basePath)
{
std::cout << "Reading file names... " << std::endl;
std::string bgrPath = "/rgb/", bgrSuf = ".png";
std::string dPath = "/depth/", dSuf = ".png";
std::string infoPath = "/poses/", infoSuf = ".txt";
bgrFiles = getFiles(basePath + bgrPath, bgrSuf, true);
if(bgrFiles.empty())
bgrFiles = getFiles(basePath + bgrPath, ".jpg");
depthFiles = getFiles(basePath + dPath, dSuf);
if(depthFiles.empty())
depthFiles = getFiles(basePath + dPath, ".tiff");
infoFiles = getFiles(basePath + infoPath, infoSuf, true);
// optional subsampling of images
std::vector<std::string> bgrFilesTemp;
std::vector<std::string> depthFilesTemp;
std::vector<std::string> infoFilesTemp;
for(unsigned i = 0; i < bgrFiles.size(); i+=GlobalProperties::getInstance()->dP.imageSubSample)
{
if(!bgrFiles.empty()) bgrFilesTemp.push_back(bgrFiles[i]);
if(!depthFiles.empty()) depthFilesTemp.push_back(depthFiles[i]);
if(!infoFiles.empty()) infoFilesTemp.push_back(infoFiles[i]);
}
bgrFiles = bgrFilesTemp;
depthFiles = depthFilesTemp;
infoFiles = infoFilesTemp;
}
// image data files
std::vector<std::string> bgrFiles;
std::vector<std::string> depthFiles;
// groundtruth data files
std::vector<std::string> infoFiles;
};
}
|
main.c | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
#include "../../common/parboil.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "convert_dataset.h"
#include "file.h"
#include "BenchmarksUtil.h"
#define ERROR_THRESHOLD 0.05
double t_start, t_end, t_start_GPU, t_end_GPU;
float *h_Ax_vector_GPU, *h_Ax_vector_CPU;
int N;
typedef float DATA_TYPE;
int compareResults(DATA_TYPE *A, DATA_TYPE *A_GPU) {
int i, fail = 0;
for (i = 0; i < N; i++) {
if (percentDiff(A[i], A_GPU[i]) > ERROR_THRESHOLD) {
fail++;
}
}
// print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f "
"Percent: %d\n",
ERROR_THRESHOLD, fail);
return fail;
}
static int generate_vector(float *x_vector, int dim) {
srand(54321);
int i;
for (i = 0; i < dim; i++) {
x_vector[i] = (rand() / (float)RAND_MAX);
}
return 0;
}
/*
void jdsmv(int height, int len, float* value, int* perm, int* jds_ptr, int*
col_index, float* vector,
float* result){
int i;
int col,row;
int row_index =0;
int prem_indicator=0;
for (i=0; i<len; i++){
if (i>=jds_ptr[prem_indicator+1]){
prem_indicator++;
row_index=0;
}
if (row_index<height){
col = col_index[i];
row = perm[row_index];
result[row]+=value[i]*vector[col];
}
row_index++;
}
return;
}
*/
double spmvGPU(int argc, char **argv) {
// struct pb_TimerSet timers;
struct pb_Parameters *parameters;
// printf("CPU-based sparse matrix vector multiplication****\n");
// printf("Original version by Li-Wen Chang <lchang20@illinois.edu> and
//Shengzhao Wu<wu14@illinois.edu>\n");
// printf("This version maintained by Chris Rodrigues ***********\n");
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) {
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
// pb_InitializeTimerSet(&timers);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// parameters declaration
int len;
int depth;
int dim;
int pad = 1;
int nzcnt_len;
// host memory allocation
// matrix
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
// vector
float *h_Ax_vector;
float *h_x_vector;
// load matrix from files
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// inputData(parameters->inpFiles[0], &len, &depth, &dim,&nzcnt_len,&pad,
// &h_data, &h_indices, &h_ptr,
// &h_perm, &h_nzcnt);
int col_count;
coo_to_jds(parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm, &col_count, &dim,
&len, &nzcnt_len, &depth);
h_Ax_vector = (float *)malloc(sizeof(float) * dim);
h_x_vector = (float *)malloc(sizeof(float) * dim);
// generate_vector(h_x_vector, dim);
input_vec(parameters->inpFiles[1], h_x_vector, dim);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
int p, i;
t_start_GPU = rtclock();
// main execution
#pragma omp target map(to : h_nzcnt[ : nzcnt_len], h_ptr[ : col_count], h_indices[ : len], h_data[ : len], h_perm[ : col_count], h_x_vector[ : dim]) map(from : h_Ax_vector[ : dim])
#pragma omp teams distribute
for (p = 0; p < 50; p++) {
#pragma omp parallel for
for (i = 0; i < dim; i++) {
int k;
float sum = 0.0f;
// int bound = h_nzcnt[i / 32];
int bound = h_nzcnt[i];
for (k = 0; k < bound; k++) {
int j = h_ptr[k] + i;
int in = h_indices[j];
float d = h_data[j];
float t = h_x_vector[in];
sum += d * t;
}
// #pragma omp critical
h_Ax_vector[h_perm[i]] = sum;
}
}
t_end_GPU = rtclock();
h_Ax_vector_GPU = h_Ax_vector;
N = dim;
// if (parameters->outFile) {
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// outputData(parameters->outFile,h_Ax_vector,dim);
// }
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(h_data);
free(h_indices);
free(h_ptr);
free(h_perm);
free(h_nzcnt);
free(h_x_vector);
// pb_SwitchToTimer(&timers, pb_TimerID_NONE);
// pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return t_end_GPU - t_start_GPU;
}
double spmvCPU(int argc, char **argv) {
// struct pb_TimerSet timers;
struct pb_Parameters *parameters;
// printf("CPU-based sparse matrix vector multiplication****\n");
// printf("Original version by Li-Wen Chang <lchang20@illinois.edu> and
//Shengzhao Wu<wu14@illinois.edu>\n");
// printf("This version maintained by Chris Rodrigues ***********\n");
parameters = pb_ReadParameters(&argc, argv);
if ((parameters->inpFiles[0] == NULL) || (parameters->inpFiles[1] == NULL)) {
fprintf(stderr, "Expecting two input filenames\n");
exit(-1);
}
// pb_InitializeTimerSet(&timers);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
// parameters declaration
int len;
int depth;
int dim;
int pad = 1;
int nzcnt_len;
// host memory allocation
// matrix
float *h_data;
int *h_indices;
int *h_ptr;
int *h_perm;
int *h_nzcnt;
// vector
float *h_Ax_vector;
float *h_x_vector;
// load matrix from files
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// inputData(parameters->inpFiles[0], &len, &depth, &dim,&nzcnt_len,&pad,
// &h_data, &h_indices, &h_ptr,
// &h_perm, &h_nzcnt);
int col_count;
coo_to_jds(parameters->inpFiles[0], // bcsstk32.mtx, fidapm05.mtx, jgl009.mtx
1, // row padding
pad, // warp size
1, // pack size
1, // is mirrored?
0, // binary matrix
0, // debug level [0:2]
&h_data, &h_ptr, &h_nzcnt, &h_indices, &h_perm, &col_count, &dim,
&len, &nzcnt_len, &depth);
h_Ax_vector = (float *)malloc(sizeof(float) * dim);
h_x_vector = (float *)malloc(sizeof(float) * dim);
// generate_vector(h_x_vector, dim);
input_vec(parameters->inpFiles[1], h_x_vector, dim);
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
int p, i;
// main execution
t_start = rtclock();
for (p = 0; p < 50; p++) {
for (i = 0; i < dim; i++) {
int k;
float sum = 0.0f;
// int bound = h_nzcnt[i / 32];
int bound = h_nzcnt[i];
for (k = 0; k < bound; k++) {
int j = h_ptr[k] + i;
int in = h_indices[j];
float d = h_data[j];
float t = h_x_vector[in];
sum += d * t;
}
// #pragma omp critical
h_Ax_vector[h_perm[i]] = sum;
}
}
t_end = rtclock();
h_Ax_vector_CPU = h_Ax_vector;
N = dim;
// if (parameters->outFile) {
// pb_SwitchToTimer(&timers, pb_TimerID_IO);
// outputData(parameters->outFile,h_Ax_vector,dim);
// }
// pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
free(h_data);
free(h_indices);
free(h_ptr);
free(h_perm);
free(h_nzcnt);
free(h_x_vector);
// pb_SwitchToTimer(&timers, pb_TimerID_NONE);
// pb_PrintTimerSet(&timers);
pb_FreeParameters(parameters);
return t_end - t_start;
}
int main(int argc, char **argv) {
double t_GPU, t_CPU;
int fail = 0;
t_GPU = spmvGPU(argc, argv);
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_GPU);
#ifdef RUN_TEST
t_CPU = spmvCPU(argc, argv);
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_CPU);
fail = compareResults(h_Ax_vector_GPU, h_Ax_vector_CPU);
#endif
free(h_Ax_vector_GPU);
free(h_Ax_vector_CPU);
return fail;
}
|
GB_binop__bset_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__bset_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int8)
// C=scalar+B GB (_bind1st__bset_int8)
// C=scalar+B' GB (_bind1st_tran__bset_int8)
// C=A+scalar GB (_bind2nd__bset_int8)
// C=A'+scalar GB (_bind2nd_tran__bset_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_BITSET (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITSET (x, y, int8_t, 8) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT8 || GxB_NO_BSET_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bset_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_BITSET (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = GB_BITSET (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (x, aij, int8_t, 8) ; \
}
GrB_Info GB (_bind1st_tran__bset_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (aij, y, int8_t, 8) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cg.c | /*--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - CG
This benchmark is an OpenMP C version of the NPB CG code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http://pdplab.trc.rwcp.or.jp/pdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http://www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------*/
/*--------------------------------------------------------------------
Authors: M. Yarrow
C. Kuszmaul
OpenMP C version: S. Satoh
--------------------------------------------------------------------*/
/*
c---------------------------------------------------------------------
c Note: please observe that in the routine conj_grad three
c implementations of the sparse matrix-vector multiply have
c been supplied. The default matrix-vector multiply is not
c loop unrolled. The alternate implementations are unrolled
c to a depth of 2 and unrolled to a depth of 8. Please
c experiment with these to find the fastest for your particular
c architecture. If reporting timing results, any of these three may
c be used without penalty.
c---------------------------------------------------------------------
*/
#include "npb-C.h"
#include "npbparams.h"
#include "openacc.h"
#define NZ NA*(NONZER+1)*(NONZER+1)+NA*(NONZER+2)
#ifdef _OPENARC_
#pragma openarc #define NZ \NA*(\NONZER+1)*(\NONZER+1)+\NA*(\NONZER+2)
#endif
/* global variables */
/* common /partit_size/ */
static int naa;
static int nzz;
static int firstrow;
static int lastrow;
static int firstcol;
static int lastcol;
/* common /main_int_mem/ */
//static int colidx[NZ+1]; /* colidx[1:NZ] */
//static int rowstr[NA+1+1]; /* rowstr[1:NA+1] */
static int iv[2*NA+1+1]; /* iv[1:2*NA+1] */
static int arow[NZ+1]; /* arow[1:NZ] */
static int acol[NZ+1]; /* acol[1:NZ] */
static int *colidx;
static int *rowstr;
/* common /main_flt_mem/ */
static float v[NA+1+1]; /* v[1:NA+1] */
static float aelt[NZ+1]; /* aelt[1:NZ] */
//static float a[NZ+1]; /* a[1:NZ] */
//static float x[NA+2+1]; /* x[1:NA+2] */
//static float z[NA+2+1]; /* z[1:NA+2] */
//static float p[NA+2+1]; /* p[1:NA+2] */
//static float q[NA+2+1]; /* q[1:NA+2] */
//static float r[NA+2+1]; /* r[1:NA+2] */
//static float w[NA+2+1]; /* w[1:NA+2] */
static float *a;
static float *x;
static float *z;
static float *p;
static float *q;
static float *r;
static float *w;
/* common /urando/ */
static float amult;
static float tran;
// Static variables used in conj_grad().
static float d, sum, rho, rho0, alpha, beta;
/* function declarations */
static void conj_grad (int colidx[NZ+1], int rowstr[NA+1+1], float x[NA+2+1], float z[NA+2+1],
float a[NZ+1], float p[NA+2+1], float q[NA+2+1], float r[NA+2+1],
float w[NA+2+1], float *rnorm);
static void makea(int n, int nz, float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1],
int nonzer, int firstrow, int lastrow, int firstcol,
int lastcol, float rcond, int arow[NZ+1], int acol[NZ+1],
float aelt[NZ+1], float v[NA+1+1], int iv[2*NA+1+1], float shift );
static void sparse(float a[NZ+1], int colidx[NZ+1], int rowstr[NA+1+1], int n,
int arow[NZ+1], int acol[NZ+1], float aelt[NZ+1],
int firstrow, int lastrow,
float x[NA+1+1], boolean mark[NA+1], int nzloc[NA+1], int nnza);
static void sprnvc(int n, int nz, float v[], int iv[], int nzloc[],
int mark[]);
static int icnvrt(float x, int ipwr2);
static void vecset(int n, float v[], int iv[], int *nzv, int i, float val);
/*--------------------------------------------------------------------
program cg
--------------------------------------------------------------------*/
int main(int argc, char **argv) {
int i_main, j_main, k_main, it;
int nthreads = 1;
float zeta;
float rnorm;
float norm_temp11;
float norm_temp12;
float t, mflops;
char classT = 'U';
boolean verified;
float zeta_verify_value, epsilon;
////////////////////////////////////
// Used for inlining conj_grad(). //
////////////////////////////////////
int i, j, k;
int cgit, cgitmax = 25;
firstrow = 1;
lastrow = NA;
firstcol = 1;
lastcol = NA;
if (NA == 1400 && NONZER == 7 && NITER == 15 && SHIFT == 10.0) {
classT = 'S';
// zeta_verify_value = 8.5971775078648;
zeta_verify_value = 8.379274368286; //serial version value with Single Precision
} else if (NA == 7000 && NONZER == 8 && NITER == 15 && SHIFT == 12.0) {
classT = 'W';
// zeta_verify_value = 10.362595087124;
zeta_verify_value = 10.11725139618; //serial version value with Single Precision
} else if (NA == 14000 && NONZER == 11 && NITER == 15 && SHIFT == 20.0) {
classT = 'A';
// zeta_verify_value = 17.130235054029;
zeta_verify_value = 18.62915039062; //serial version value with Single Precision
} else if (NA == 75000 && NONZER == 13 && NITER == 75 && SHIFT == 60.0) {
classT = 'B';
// zeta_verify_value = 22.712745482631;
zeta_verify_value = 62.42129135132; //serial version value with Single Precision
} else if (NA == 150000 && NONZER == 15 && NITER == 75 && SHIFT == 110.0) {
classT = 'C';
// zeta_verify_value = 28.973605592845;
zeta_verify_value = 115.1209869385; //serial version value with Single Precision
} else {
classT = 'U';
}
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"
" - CG Benchmark\n");
printf(" Size: %10d\n", NA);
printf(" Iterations: %5d\n", NITER);
naa = NA;
nzz = NZ;
timer_clear(2);
timer_clear(3);
timer_clear(4);
timer_start(2);
/*--------------------------------------------------------------------
c Initialize random number generator
c-------------------------------------------------------------------*/
// Initial numbers are changed for single precision
// tran = 314159265.0;
// amult = 1220703125.0;
tran = 28183.0f;
amult = 390625.0f;
zeta = randlc( &tran, amult );
// Allocate the main data structures.
/*
colidx = (int *)malloc(sizeof(int)*(NZ+1));
rowstr = (int *)malloc(sizeof(int)*(NA+1+1));
a = (float *)malloc(sizeof(float)*(NZ+1));
x = (float *)malloc(sizeof(float)*(NA+2+1));
z = (float *)malloc(sizeof(float)*(NA+2+1));
p = (float *)malloc(sizeof(float)*(NA+2+1));
q = (float *)malloc(sizeof(float)*(NA+2+1));
r = (float *)malloc(sizeof(float)*(NA+2+1));
w = (float *)malloc(sizeof(float)*(NA+2+1));
*/
colidx = (int *)acc_create_unified(NULL, sizeof(int)*(NZ+1));
rowstr = (int *)acc_create_unified(NULL, sizeof(int)*(NA+1+1));
a = (float *)acc_create_unified(NULL, sizeof(float)*(NZ+1));
x = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
z = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
p = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
q = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
r = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
w = (float *)acc_create_unified(NULL, sizeof(float)*(NA+2+1));
/*--------------------------------------------------------------------
c
c-------------------------------------------------------------------*/
timer_start(4);
makea(naa, nzz, a, colidx, rowstr, NONZER,
firstrow, lastrow, firstcol, lastcol,
RCOND, arow, acol, aelt, v, iv, SHIFT);
timer_stop(4);
timer_start(3);
/*---------------------------------------------------------------------
c Note: as a result of the above call to makea:
c values of j used in indexing rowstr go from 1 --> lastrow-firstrow+1
c values of colidx which are col indexes go from firstcol --> lastcol
c So:
c Shift the col index vals from actual (firstcol --> lastcol )
c to local, i.e., (1 --> lastcol-firstcol+1)
c---------------------------------------------------------------------*/
#pragma acc data \
create(x[0:NA+3]) \
create(z[0:NA+3]) \
create(p[0:NA+3]) \
create(q[0:NA+3]) \
create(r[0:NA+3]) \
create(w[0:NA+3]) \
copyin(a[0:NZ+1]) \
copyin(colidx[0:NZ+1]) \
copyin(rowstr[0:NA+2])
{
timer_stop(3);
// R/O Shared scalar: lastrow, firstrow, firstcol
// R/O Shared arrays: rowstr[NA+1+1]
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalar: j_main, k_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastrow - firstrow + 1; j_main++) {
for (k_main = rowstr[j_main]; k_main < rowstr[j_main+1]; k_main++) {
colidx[k_main] = colidx[k_main] - firstcol + 1;
}
}
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalar: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalar: zeta
zeta = 0.0f;
/*-------------------------------------------------------------------
c---->
c Do one iteration untimed to init all code and data page tables
c----> (then reinit, start timing, to niter its)
c-------------------------------------------------------------------*/
for (it = 1; it <= 1; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad (colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/w Shared scalars: norm_temp12
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of do one iteration untimed */
/*--------------------------------------------------------------------
c set starting vector to (1, 1, .... 1)
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: i_main
#pragma acc kernels loop gang worker
for (i_main = 1; i_main <= NA+1; i_main++) {
x[i_main] = 1.0f;
}
// R/W Shared scalars: zeta
zeta = 0.0f;
// } /* end parallel */
timer_clear( 1 );
timer_start( 1 );
/*--------------------------------------------------------------------
c---->
c Main Iteration for inverse power method
c---->
c-------------------------------------------------------------------*/
//#pragma omp parallel private(it,i_main,j_main,k_main)
// {
for (it = 1; it <= NITER; it++) {
/*--------------------------------------------------------------------
c The call to the conjugate gradient routine:
c-------------------------------------------------------------------*/
//conj_grad(colidx, rowstr, x, z, a, p, q, r, w, &rnorm);
cgitmax = 25;
// R/W Shared scalars: rho (function-static)
rho = 0.0f;
/*--------------------------------------------------------------------
c Initialize the CG algorithm:
c-------------------------------------------------------------------*/
// R/W Shared arrays: x[NA+2+1], r[NA+2+1]
// R/W Shared arrays: q[NA+2+1], z[NA+2+1], r[NA+2+1], p[NA+2+1], w[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= NA+1; j++) {
q[j] = 0.0f;
z[j] = 0.0f;
r[j] = x[j];
p[j] = r[j];
w[j] = 0.0f;
}
/*--------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + x[j]*x[j];
}
/*--------------------------------------------------------------------
c---->
c The conj grad iteration loop
c---->
c-------------------------------------------------------------------*/
for (cgit = 1; cgit <= cgitmax; cgit++) {
// R/W Shared scalars: d, rho, rho0 (function-static)
{
rho0 = rho;
d = 0.0f;
rho = 0.0f;
} /* end single */
/*--------------------------------------------------------------------
c q = A.p
c The partition submatrix-vector multiply: use workspace w
c---------------------------------------------------------------------
C
C NOTE: this version of the multiply is actually (slightly: maybe %5)
C faster on the sp2 on 16 nodes than is the unrolled-by-2 version
C below. On the Cray t3d, the reverse is true, i.e., the
C unrolled-by-two version is some 10% faster.
C The unrolled-by-8 version below is significantly faster
C on the Cray t3d - overall speed of code is 1.5 times faster.
*/
/* rolled version */
// R/O Shared scalars: lastrow, firstrow
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], p[NA+2+1], colidx[NZ+1],
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j, k, sum
#pragma acc kernels loop gang worker independent private(sum)
for (j = 1; j <= lastrow-firstrow+1; j++) {
sum = 0.0f;
for (k = rowstr[j]; k < rowstr[j+1]; k++) {
sum = sum + a[k]*p[colidx[k]];
}
w[j] = sum;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: q[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
q[j] = w[j];
}
/*--------------------------------------------------------------------
c Clear w for reuse...
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j
/*--------------------------------------------------------------------
c Obtain p.q
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared scalars: d (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
w[j] = 0.0f;
d = d + p[j]*q[j];
}
/*--------------------------------------------------------------------
c Obtain alpha = rho / (p.q)
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, d (function-static)
// R/W Shared scalars: alpha (function-static)
alpha = rho0 / d;
/*--------------------------------------------------------------------
c Save a temporary of rho
c-------------------------------------------------------------------*/
/* rho0 = rho;*/
/*---------------------------------------------------------------------
c Obtain z = z + alpha*p
c and r = r - alpha*q
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: alpha (function-static)
// R/O Shared arrays: p[NA+2+1], q[NA+2+1]
// R/W Shared arrays: z[NA+2+1], r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
z[j] = z[j] + alpha*p[j];
r[j] = r[j] - alpha*q[j];
}
/*---------------------------------------------------------------------
c rho = r.r
c Now, obtain the norm of r: First, sum squares of r elements locally...
c---------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1]
// R/W Shared scalars: rho (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
rho = rho + r[j]*r[j];
}
/*--------------------------------------------------------------------
c Obtain beta:
c-------------------------------------------------------------------*/
// R/O Shared scalars: rho0, rho (function-static)
// R/W Shared scalars: beta (function-static)
beta = rho / rho0;
/*--------------------------------------------------------------------
c p = r + beta*p
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared scalars: beta (function-static)
// R/O Shared arrays: r[NA+2+1]
// R/W Shared arrays: p[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
p[j] = r[j] + beta*p[j];
}
} /* end of do cgit=1,cgitmax */
/*---------------------------------------------------------------------
c Compute residual norm explicitly: ||r|| = ||x - A.z||
c First, form A.z
c The partition submatrix-vector multiply
c---------------------------------------------------------------------*/
// R/W Shared scalars: sum (function-static)
sum = 0.0f;
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: rowstr[NA+1+1], a[NZ+1], colidx[NZ+1], z[NA+2+1]
// R/W Shared arrays: w[NA+2+1]
// R/W Private scalars: j,d,k
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastrow-firstrow+1; j++) {
d = 0.0f;
for (k = rowstr[j]; k <= rowstr[j+1]-1; k++) {
d = d + a[k]*z[colidx[k]];
}
w[j] = d;
}
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: w[NA+2+1]
// R/W Shared arrays: r[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j = 1; j <= lastcol-firstcol+1; j++) {
r[j] = w[j];
}
/*--------------------------------------------------------------------
c At this point, r contains A.z
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: r[NA+2+1], x[NA+2+1]
// R/W Shared scalars: d, sum (function-static)
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent private(d)
for (j = 1; j <= lastcol-firstcol+1; j++) {
d = x[j] - r[j];
sum = sum + d*d;
}
// R/O Shared scalars: sum (function-static)
// R/W Shared scalars: rnorm
{
//(*rnorm) = sqrtf(sum);
rnorm = sqrtf(sum);
} /* end single */
/*--------------------------------------------------------------------
c zeta = shift + 1/(x.z)
c So, first: (x.z)
c Also, find norm of z
c So, first: (z.z)
c-------------------------------------------------------------------*/
// R/W Shared scalars: norm_temp11, norm_temp12
{
norm_temp11 = 0.0f;
norm_temp12 = 0.0f;
} /* end single */
// R/O Shared scalars: lastcol, firstcol
// R/O Shared arrays: x[NA+2+1], z[NA+2+1]
// R/W Shared scalars: norm_temp11, norm_temp12
// R/W Private scalars: j_main
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
norm_temp11 = norm_temp11 + x[j_main]*z[j_main];
norm_temp12 = norm_temp12 + z[j_main]*z[j_main];
}
// R/O Shared scalars: norm_temp11
// R/W Shared scalars: norm_temp12, zeta
{
norm_temp12 = 1.0f / sqrtf( norm_temp12 );
zeta = SHIFT + 1.0f / norm_temp11;
} /* end single */
{
if( it == 1 ) {
printf(" iteration ||r|| zeta\n");
}
printf(" %5d %20.14e%20.13e\n", it, rnorm, zeta);
} /* end master */
/*--------------------------------------------------------------------
c Normalize z to obtain x
c-------------------------------------------------------------------*/
// R/O Shared scalars: lastcol, firstcol, norm_temp12
// R/O Shared arrays: z[NA+2+1]
// R/W Shared arrays: x[NA+2+1]
// R/W Private scalars: j
#pragma acc kernels loop gang worker
for (j_main = 1; j_main <= lastcol-firstcol+1; j_main++) {
x[j_main] = norm_temp12*z[j_main];
}
} /* end of main iter inv pow meth */
#if defined(_OPENMP)
nthreads = omp_get_num_threads();
#endif /* _OPENMP */
} /* end parallel */
timer_stop( 1 );
timer_stop( 2 );
/*--------------------------------------------------------------------
c End of timed section
c-------------------------------------------------------------------*/
t = timer_read( 1 );
printf(" Benchmark completed\n");
//epsilon = 1.0e-10;
//New value for single precision
epsilon = 1.0e-6;
if (classT != 'U') {
if (fabs(zeta - zeta_verify_value) <= epsilon) {
verified = TRUE;
printf(" VERIFICATION SUCCESSFUL\n");
printf(" Zeta is %20.12e\n", zeta);
printf(" Error is %20.12e\n", zeta - zeta_verify_value);
} else {
verified = FALSE;
printf(" VERIFICATION FAILED\n");
printf(" Zeta %20.12e\n", zeta);
printf(" The correct zeta is %20.12e\n", zeta_verify_value);
}
} else {
verified = FALSE;
printf(" Problem size unknown\n");
printf(" NO VERIFICATION PERFORMED\n");
}
if ( t != 0.0 ) {
mflops = (2.0*NITER*NA)
* (3.0+(NONZER*(NONZER+1)) + 25.0*(5.0+(NONZER*(NONZER+1))) + 3.0 )
/ t / 1000000.0;
} else {
mflops = 0.0;
}
c_print_results("CG", classT, NA, 0, 0, NITER, nthreads, t,
mflops, " floating point",
verified, NPBVERSION, COMPILETIME,
CS1, CS2, CS3, CS4, CS5, CS6, CS7);
printf("makea() execution time = %12.4f\n", timer_read(4));
printf("CUDA Initialization time = %12.4f\n", timer_read(3));
printf("Total execution time = %12.4f\n", timer_read(2));
return 0;
}
/*---------------------------------------------------------------------
c generate the test problem for benchmark 6
c makea generates a sparse matrix with a
c prescribed sparsity distribution
c
c parameter type usage
c
c input
c
c n i number of cols/rows of matrix
c nz i nonzeros as declared array size
c rcond r*8 condition number
c shift r*8 main diagonal shift
c
c output
c
c a r*8 array for nonzeros
c colidx i col indices
c rowstr i row pointers
c
c workspace
c
c iv, arow, acol i
c v, aelt r*8
c---------------------------------------------------------------------*/
static void makea(
int n,
int nz,
float a[NZ+1], /* a[1:nz] */
int colidx[NZ+1], /* colidx[1:nz] */
int rowstr[NA+1+1], /* rowstr[1:n+1] */
int nonzer,
int firstrow,
int lastrow,
int firstcol,
int lastcol,
float rcond,
int arow[NZ+1], /* arow[1:nz] */
int acol[NZ+1], /* acol[1:nz] */
float aelt[NZ+1], /* aelt[1:nz] */
float v[NA+1+1], /* v[1:n+1] */
int iv[2*NA+1+1], /* iv[1:2*n+1] */
float shift )
{
int i, nnza, iouter, ivelt, ivelt1, irow, nzv;
/*--------------------------------------------------------------------
c nonzer is approximately (int(sqrt(nnza /n)));
c-------------------------------------------------------------------*/
float size, ratio, scale;
int jcol;
size = 1.0f;
ratio = pow(rcond, (1.0f / (float)n));
nnza = 0;
/*---------------------------------------------------------------------
c Initialize colidx(n+1 .. 2n) to zero.
c Used by sprnvc to mark nonzero positions
c---------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: colidx[NZ+1]
// R/W Private scalars: i
#pragma acc kernels loop gang worker pcopyout(colidx)
for (i = 1; i <= n; i++) {
colidx[n+i] = 0;
}
for (iouter = 1; iouter <= n; iouter++) {
nzv = nonzer;
sprnvc(n, nzv, v, iv, &(colidx[0]), &(colidx[n]));
vecset(n, v, iv, &nzv, iouter, 0.5);
for (ivelt = 1; ivelt <= nzv; ivelt++) {
jcol = iv[ivelt];
if (jcol >= firstcol && jcol <= lastcol) {
scale = size * v[ivelt];
for (ivelt1 = 1; ivelt1 <= nzv; ivelt1++) {
irow = iv[ivelt1];
if (irow >= firstrow && irow <= lastrow) {
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in"
" makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = jcol;
arow[nnza] = irow;
aelt[nnza] = v[ivelt1] * scale;
}
}
}
}
size = size * ratio;
}
/*---------------------------------------------------------------------
c ... add the identity * rcond to the generated matrix to bound
c the smallest eigenvalue from below by rcond
c---------------------------------------------------------------------*/
for (i = firstrow; i <= lastrow; i++) {
if (i >= firstcol && i <= lastcol) {
iouter = n + i;
nnza = nnza + 1;
if (nnza > nz) {
printf("Space for matrix elements exceeded in makea\n");
printf("nnza, nzmax = %d, %d\n", nnza, nz);
printf("iouter = %d\n", iouter);
exit(1);
}
acol[nnza] = i;
arow[nnza] = i;
aelt[nnza] = rcond - shift;
}
}
/*---------------------------------------------------------------------
c ... make the sparse matrix from list of elements with duplicates
c (v and iv are used as workspace)
c---------------------------------------------------------------------*/
sparse(a, colidx, rowstr, n, arow, acol, aelt,
firstrow, lastrow, v, &(iv[0]), &(iv[n]), nnza);
}
/*---------------------------------------------------
c generate a sparse matrix from a list of
c [col, row, element] tri
c---------------------------------------------------*/
static void sparse(
float a[NZ+1], /* a[1:*] */
int colidx[NZ+1], /* colidx[1:*] */
int rowstr[NA+1+1], /* rowstr[1:*] */
int n,
int arow[NZ+1], /* arow[1:*] */
int acol[NZ+1], /* acol[1:*] */
float aelt[NZ+1], /* aelt[1:*] */
int firstrow,
int lastrow,
float x[NA+1+1], /* x[1:n] */
boolean mark[NA+1], /* mark[1:n] */
int nzloc[NA+1], /* nzloc[1:n] */
int nnza)
/*---------------------------------------------------------------------
c rows range from firstrow to lastrow
c the rowstr pointers are defined for nrows = lastrow-firstrow+1 values
c---------------------------------------------------------------------*/
{
int nrows;
int i, j, jajp1, nza, k, nzrow;
float xi;
/*--------------------------------------------------------------------
c how many rows of result
c-------------------------------------------------------------------*/
nrows = lastrow - firstrow + 1;
/*--------------------------------------------------------------------
c ...count the number of triples in each row
c-------------------------------------------------------------------*/
// R/O Shared scalars: n
// R/W Shared arrays: rowstr[NA+1+1], mark[n]
// R/W Private scalars: j
#pragma acc kernels loop gang worker independent \
pcopyout(rowstr[0:NA+1+1]) create(mark[0:NA+1])
for (j = 1; j <= n; j++) {
rowstr[j] = 0;
mark[j] = FALSE;
}
rowstr[n+1] = 0;
for (nza = 1; nza <= nnza; nza++) {
j = (arow[nza] - firstrow + 1) + 1;
rowstr[j] = rowstr[j] + 1;
}
rowstr[1] = 1;
for (j = 2; j <= nrows+1; j++) {
rowstr[j] = rowstr[j] + rowstr[j-1];
}
/*---------------------------------------------------------------------
c ... rowstr(j) now is the location of the first nonzero
c of row j of a
c---------------------------------------------------------------------*/
/*--------------------------------------------------------------------
c ... do a bucket sort of the triples on the row index
c-------------------------------------------------------------------*/
for (nza = 1; nza <= nnza; nza++) {
j = arow[nza] - firstrow + 1;
k = rowstr[j];
a[k] = aelt[nza];
colidx[k] = acol[nza];
rowstr[j] = rowstr[j] + 1;
}
/*--------------------------------------------------------------------
c ... rowstr(j) now points to the first element of row j+1
c-------------------------------------------------------------------*/
for (j = nrows; j >= 1; j--) {
rowstr[j+1] = rowstr[j];
}
rowstr[1] = 1;
/*--------------------------------------------------------------------
c ... generate the actual output rows by adding elements
c-------------------------------------------------------------------*/
nza = 0;
// R/O Shared scalars: n
// R/W Shared arrays: x[NA+2+1], mark[n]
// R/W Private scalars: i
#pragma acc kernels loop gang worker pcopyout(x, mark)
for (i = 1; i <= n; i++) {
x[i] = 0.0f;
mark[i] = FALSE;
}
jajp1 = rowstr[1];
for (j = 1; j <= nrows; j++) {
nzrow = 0;
/*--------------------------------------------------------------------
c ...loop over the jth row of a
c-------------------------------------------------------------------*/
for (k = jajp1; k < rowstr[j+1]; k++) {
i = colidx[k];
x[i] = x[i] + a[k];
if ( mark[i] == FALSE && x[i] != 0.0f) {
mark[i] = TRUE;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
}
}
/*--------------------------------------------------------------------
c ... extract the nonzeros of this row
c-------------------------------------------------------------------*/
for (k = 1; k <= nzrow; k++) {
i = nzloc[k];
mark[i] = FALSE;
xi = x[i];
x[i] = 0.0f;
if (xi != 0.0f) {
nza = nza + 1;
a[nza] = xi;
colidx[nza] = i;
}
}
jajp1 = rowstr[j+1];
rowstr[j+1] = nza + rowstr[1];
}
}
/*---------------------------------------------------------------------
c generate a sparse n-vector (v, iv)
c having nzv nonzeros
c
c mark(i) is set to 1 if position i is nonzero.
c mark is all zero on entry and is reset to all zero before exit
c this corrects a performance bug found by John G. Lewis, caused by
c reinitialization of mark on every one of the n calls to sprnvc
---------------------------------------------------------------------*/
static void sprnvc(
int n,
int nz,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int nzloc[], /* nzloc[1:n] */
int mark[] ) /* mark[1:n] */
{
int nn1;
int nzrow, nzv, ii, i;
float vecelt, vecloc;
nzv = 0;
nzrow = 0;
nn1 = 1;
do {
nn1 = 2 * nn1;
} while (nn1 < n);
/*--------------------------------------------------------------------
c nn1 is the smallest power of two not less than n
c-------------------------------------------------------------------*/
while (nzv < nz) {
vecelt = randlc(&tran, amult);
/*--------------------------------------------------------------------
c generate an integer between 1 and n in a portable manner
c-------------------------------------------------------------------*/
vecloc = randlc(&tran, amult);
i = icnvrt(vecloc, nn1) + 1;
if (i > n) continue;
/*--------------------------------------------------------------------
c was this integer generated already?
c-------------------------------------------------------------------*/
if (mark[i] == 0) {
mark[i] = 1;
nzrow = nzrow + 1;
nzloc[nzrow] = i;
nzv = nzv + 1;
v[nzv] = vecelt;
iv[nzv] = i;
}
}
for (ii = 1; ii <= nzrow; ii++) {
i = nzloc[ii];
mark[i] = 0;
}
}
/*---------------------------------------------------------------------
* scale a float precision number x in (0,1) by a power of 2 and chop it
*---------------------------------------------------------------------*/
static int icnvrt(float x, int ipwr2) {
return ((int)(ipwr2 * x));
}
/*--------------------------------------------------------------------
c set ith element of sparse vector (v, iv) with
c nzv nonzeros to val
c-------------------------------------------------------------------*/
static void vecset(
int n,
float v[], /* v[1:*] */
int iv[], /* iv[1:*] */
int *nzv,
int i,
float val)
{
int k;
boolean set;
set = FALSE;
for (k = 1; k <= *nzv; k++) {
if (iv[k] == i) {
v[k] = val;
set = TRUE;
}
}
if (set == FALSE) {
*nzv = *nzv + 1;
v[*nzv] = val;
iv[*nzv] = i;
}
}
|
testingPolicies.h | #pragma once
#include <iostream>
#include "timeHandler.h"
#include "datatypes.h"
#include "cxxopts.hpp"
#include "operators.h"
#include "locationTypesFormat.h"
#include "smallTools.h"
template<typename SimulationType>
class NoTesting {
public:
// add program parameters if we need any, this function got called already from Simulation
static void addProgramParameters(cxxopts::Options& options) {}
void initializeArgs(const cxxopts::ParseResult& result) {}
void init(const parser::LocationTypes& data) {}
void performTests(Timehandler simTime, unsigned timeStep) {}
auto getStats() { return thrust::make_tuple(0u, 0u, 0u); }
};
namespace DetailedTestingOps {
template<typename PPState, typename LocationType>
struct TestingArguments {
HD TestingArguments() {}
PPState* agentStatesPtr;
AgentStats* agentStatsPtr;
unsigned long* locationOffsetPtr;
unsigned* possibleLocationsPtr;
unsigned* possibleTypesPtr;
unsigned* locationQuarantineUntilPtr;
unsigned hospitalType;
unsigned homeType;
unsigned publicPlaceType;
unsigned doctorType;
unsigned schoolType;
unsigned classroomType;
unsigned nurseryhomeType;
unsigned workType;
unsigned timeStep;
unsigned timestamp;
unsigned tracked;
LocationType* locationTypePtr;
unsigned* lastTestPtr;
bool* locationFlagsPtr;
bool* diagnosedPtr;
double testingRandom;
double testingHome;
double testingWork;
double testingSchool;
double testingRandomHospital;
double testingNurseryHome;
unsigned testingDelay;
unsigned quarantineLength;
bool usePCR;
};
template<typename PPState, typename LocationType>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
__device__
#endif
void
flagLocations(unsigned i, TestingArguments<PPState, LocationType>& a) {
// If diagnosed in the last 24 hours
if (a.agentStatsPtr[i].diagnosedTimestamp > a.timestamp - 24 * 60 / a.timeStep) {
// Mark home
unsigned home = RealMovementOps::findActualLocationForType(i,
a.homeType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (home != std::numeric_limits<unsigned>::max()) a.locationFlagsPtr[home] = true;
// Mark work
unsigned work = RealMovementOps::findActualLocationForType(i,
a.workType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (work != std::numeric_limits<unsigned>::max()
&& (a.locationQuarantineUntilPtr[work] == 0 ||// Should test if it was not quarantined, OR
(a.locationQuarantineUntilPtr[work] != 0
&&// It has been quarantined - either in last 24 hours, OR it's already over
(a.locationQuarantineUntilPtr[work] - a.quarantineLength * 24 * 60 / a.timeStep
>= a.timestamp - 24 * 60 / a.timeStep
|| a.locationQuarantineUntilPtr[work] < a.timestamp))))
a.locationFlagsPtr[work] = true;
// Mark school
unsigned school = RealMovementOps::findActualLocationForType(i,
a.schoolType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
unsigned classroom = std::numeric_limits<unsigned>::max();
if (school != std::numeric_limits<unsigned>::max()
&& (a.locationQuarantineUntilPtr[school] == 0 ||// Should test if it was not quarantined, OR
(a.locationQuarantineUntilPtr[school] != 0
&&// It has been quarantined - either in last 24 hours, OR it's already over
(a.locationQuarantineUntilPtr[school] - a.quarantineLength * 24 * 60 / a.timeStep
>= a.timestamp - 24 * 60 / a.timeStep
|| a.locationQuarantineUntilPtr[school] < a.timestamp)))) {
a.locationFlagsPtr[school] = true;
// Mark classroom too
classroom = RealMovementOps::findActualLocationForType(i,
a.classroomType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (classroom != std::numeric_limits<unsigned>::max()
&& (a.locationQuarantineUntilPtr[classroom] == 0 ||// Should test if it was not quarantined, OR
(a.locationQuarantineUntilPtr[classroom] != 0
&&// It has been quarantined - either in last 24 hours, OR it's already over
(a.locationQuarantineUntilPtr[classroom] - a.quarantineLength * 24 * 60 / a.timeStep
>= a.timestamp - 24 * 60 / a.timeStep
|| a.locationQuarantineUntilPtr[classroom] < a.timestamp))))
a.locationFlagsPtr[classroom] = true;
}
if (a.tracked == i) {
printf("Testing: Agent %d was diagnosed in last 24 hours, marking home %d, work %d school %d classroom %d\n",
i,
home,
work == std::numeric_limits<unsigned>::max() ? -1 : (int)work,
school == std::numeric_limits<unsigned>::max() ? -1 : (int)school,
classroom == std::numeric_limits<unsigned>::max() ? -1 : (int)classroom);
}
}
}
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
template<typename PPState, typename LocationType>
__global__ void flagLocationsDriver(TestingArguments<PPState, LocationType> a, unsigned numberOfAgents) {
unsigned i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numberOfAgents) { DetailedTestingOps::flagLocations(i, a); }
}
#endif
template<typename PPState, typename LocationType>
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
__device__
#endif
void
doTesting(unsigned i, TestingArguments<PPState, LocationType>& a) {
// if recently tested, don't test again
if (a.timestamp > a.testingDelay * 24 * 60 / a.timeStep && a.lastTestPtr[i] != std::numeric_limits<unsigned>::max()
&& a.lastTestPtr[i] > a.timestamp - a.testingDelay * 24 * 60 / a.timeStep)
return;
if (a.agentStatesPtr[i].getWBState() == states::WBStates::D || a.diagnosedPtr[i]) return;
// Check home
unsigned home = RealMovementOps::findActualLocationForType(i,
a.homeType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
bool homeFlag = false;
if (home != std::numeric_limits<unsigned>::max()) homeFlag = a.locationFlagsPtr[home];
// Check work
unsigned work = RealMovementOps::findActualLocationForType(i,
a.workType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
bool workFlag = false;
if (work != std::numeric_limits<unsigned>::max()) workFlag = a.locationFlagsPtr[work];
// Check school
unsigned school = RealMovementOps::findActualLocationForType(i,
a.schoolType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
unsigned classroom = std::numeric_limits<unsigned>::max();
bool schoolFlag = false;
bool classroomFlag = false;
if (school != std::numeric_limits<unsigned>::max()) {
schoolFlag = a.locationFlagsPtr[school];
classroom = RealMovementOps::findActualLocationForType(i,
a.classroomType,
a.locationOffsetPtr,
a.possibleLocationsPtr,
a.possibleTypesPtr,
a.homeType,
a.schoolType,
a.workType,
0,
nullptr);
if (classroom != std::numeric_limits<unsigned>::max()) classroomFlag = true;
}
double testingProbability = a.testingRandom;
testingProbability += homeFlag * a.testingHome;
testingProbability += workFlag * a.testingWork;
testingProbability += schoolFlag * a.testingSchool;
testingProbability += classroomFlag * 3.0 * a.testingSchool;
// If agent works in hospital or doctor's office
if (work != std::numeric_limits<unsigned>::max()
&& (a.locationTypePtr[work] == a.doctorType || a.locationTypePtr[work] == a.hospitalType)) {
testingProbability += a.testingRandomHospital;
}
// If agent works in nursery home
if (work != std::numeric_limits<unsigned>::max() && a.locationTypePtr[work] == a.nurseryhomeType) {
testingProbability += a.testingNurseryHome;
}
// If agent is hospitalized for non-COVID
if (a.agentStatsPtr[i].hospitalizedTimestamp <= a.timestamp
&& a.agentStatsPtr[i].hospitalizedUntilTimestamp > a.timestamp) {
testingProbability += a.testingRandomHospital;
}
if (a.tracked == i && testingProbability > 0.0)
printf("Testing: Agent %d testing probability: %g\n", i, testingProbability);
// Do the test
if (testingProbability > 1.0 || RandomGenerator::randomReal(1.0) < testingProbability) {
a.lastTestPtr[i] = a.timestamp;
if (a.agentStatesPtr[i].isInfected()) {
float probability = a.usePCR ? a.agentStatesPtr[i].getAccuracyPCR() : a.agentStatesPtr[i].getAccuracyAntigen();
if (probability > RandomGenerator::randomReal(1.0)) {
a.diagnosedPtr[i] = true;
a.agentStatsPtr[i].diagnosedTimestamp = a.timestamp;
if (a.tracked == i) printf("\t Agent %d tested positive\n", i);
} else {
if (a.tracked == i) printf("\t Agent %d tested FALSE negative\n", i);
}
} else {
// Release from quarantine if home is not quarantined
if (a.agentStatsPtr[i].quarantinedUntilTimestamp > a.timestamp
&& (home != std::numeric_limits<unsigned>::max() && a.locationQuarantineUntilPtr[home] < a.timestamp)) {
// Reduce number of days spent in quarantine
if (a.agentStatsPtr[i].daysInQuarantine > 0)
a.agentStatsPtr[i].daysInQuarantine -=
(a.agentStatsPtr[i].quarantinedUntilTimestamp - a.timestamp) / (24 * 60 / a.timeStep);
// End quarantine
a.agentStatsPtr[i].quarantinedUntilTimestamp =
a.timestamp;// a.quarantinedPtr will be cleared by next movementPolicy
}
if (a.tracked == i) printf("\t Agent %d tested negative\n", i);
}
} else if (testingProbability > 0.0) {
if (a.tracked == i) printf("\t Agent %d was not tested\n", i);
}
}
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
template<typename PPState, typename LocationType>
__global__ void doTestingDriver(TestingArguments<PPState, LocationType> a, unsigned numberOfAgents) {
unsigned i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < numberOfAgents) { DetailedTestingOps::doTesting(i, a); }
}
#endif
}// namespace DetailedTestingOps
template<typename SimulationType>
class DetailedTesting {
unsigned publicSpace;
unsigned home;
unsigned hospital;
unsigned doctor;
unsigned tracked;
unsigned quarantineLength;
unsigned school;
unsigned classroom;
unsigned nurseryhome;
unsigned work;
thrust::tuple<unsigned, unsigned, unsigned> stats;
thrust::device_vector<unsigned> lastTest;
thrust::device_vector<bool> locationFlags;
double testingRandom = 0.005;
double testingHome = 0.2;
double testingWork = 0.1;
double testingSchool = 0.1;
double testingRandomHospital = 0.2;
double testingNurseryHome = 0.3;
unsigned testingDelay = 5;
bool usePCR = true;
public:
// add program parameters if we need any, this function got called already from Simulation
static void addProgramParameters(cxxopts::Options& options) {
options.add_options()("testingProbabilities",
"Testing probabilities for random, if someone else was diagnosed at home/work/school, and random for hospital "
"workers: comma-delimited string random,home,work,school,hospital,nurseryHome",
cxxopts::value<std::string>()->default_value("0.00005,0.01,0.0005,0.0005,0.005,0.05"))("testingRepeatDelay",
"Minimum number of days between taking tests",
cxxopts::value<unsigned>()->default_value(std::to_string(unsigned(5))))("testingMethod",
"default method for testing. Can be PCR (default) on antigen. Accuracies are provided in progression json input",
cxxopts::value<std::string>()->default_value("PCR"));
}
void initializeArgs(const cxxopts::ParseResult& result) {
testingDelay = result["testingRepeatDelay"].as<unsigned>();
std::string probsString = result["testingProbabilities"].as<std::string>();
std::vector<double> params = splitStringDouble(probsString, ',');
if (params.size() > 0) testingRandom = params[0];
if (params.size() > 1) testingHome = params[1];
if (params.size() > 2) testingWork = params[2];
if (params.size() > 3) testingSchool = params[3];
if (params.size() > 4) testingRandomHospital = params[4];
if (params.size() > 5) testingNurseryHome = params[5];
// printf("testing probabilities: %g %g %g %g %g\n", testingRandom, testingHome, testingWork, testingSchool,
// testingRandomHospital);
try {
quarantineLength = result["quarantineLength"].as<unsigned>();
} catch (std::exception& e) { quarantineLength = 14; }
if (result["testingMethod"].as<std::string>().compare("PCR") == 0)
usePCR = true;
else if (result["testingMethod"].as<std::string>().compare("antigen") == 0)
usePCR = false;
else
throw CustomErrors(
"unrecognized testingMethod " + result["testingMethod"].as<std::string>() + " must be either PCR or antigen");
}
auto getStats() { return stats; }
void init(const parser::LocationTypes& data) {
publicSpace = data.publicSpace;
home = data.home;
hospital = data.hospital;
doctor = data.doctor;
school = data.school;
work = data.work;
classroom = data.classroom;
nurseryhome = data.nurseryhome;
}
void performTests(Timehandler simTime, unsigned timeStep) {
// PROFILE_FUNCTION();
auto realThis = static_cast<SimulationType*>(this);
DetailedTestingOps::TestingArguments<typename SimulationType::PPState_t, typename SimulationType::TypeOfLocation_t> a;
thrust::device_vector<unsigned>& agentLocations = realThis->agents->location;
unsigned numberOfLocations = realThis->locs->locType.size();
unsigned numberOfAgents = agentLocations.size();
a.timestamp = simTime.getTimestamp();
// Running for the first time - initialize arrays
if (lastTest.size() == 0) {
lastTest.resize(numberOfAgents);
thrust::fill(lastTest.begin(), lastTest.end(), std::numeric_limits<unsigned>::max());
locationFlags.resize(numberOfLocations);
tracked = realThis->locs->tracked;
}
// Set all flags of all locations to false (no recent diagnoses)
thrust::fill(locationFlags.begin(), locationFlags.end(), false);
a.tracked = tracked;
a.locationFlagsPtr = thrust::raw_pointer_cast(locationFlags.data());
a.lastTestPtr = thrust::raw_pointer_cast(lastTest.data());
a.hospitalType = hospital;
a.homeType = home;
a.publicPlaceType = publicSpace;
a.doctorType = doctor;
a.timeStep = timeStep;
a.schoolType = school;
a.classroomType = classroom;
a.workType = work;
a.testingHome = testingHome;
a.testingWork = testingWork;
a.testingSchool = testingSchool;
a.testingRandomHospital = testingRandomHospital;
a.testingRandom = testingRandom;
a.testingDelay = testingDelay;
a.quarantineLength = quarantineLength;
a.testingNurseryHome = testingNurseryHome;
a.usePCR = usePCR;
// agent data
thrust::device_vector<AgentStats>& agentStats = realThis->agents->agentStats;
a.agentStatsPtr = thrust::raw_pointer_cast(agentStats.data());
thrust::device_vector<typename SimulationType::PPState_t>& agentStates = realThis->agents->PPValues;
a.agentStatesPtr = thrust::raw_pointer_cast(agentStates.data());
thrust::device_vector<bool>& diagnosed = realThis->agents->diagnosed;
a.diagnosedPtr = thrust::raw_pointer_cast(diagnosed.data());
// primary location types
thrust::device_vector<typename SimulationType::TypeOfLocation_t>& locationTypes = realThis->locs->locType;
a.locationTypePtr = thrust::raw_pointer_cast(locationTypes.data());
// Arrays storing actual location IDs for each agent, for each location type
thrust::device_vector<unsigned long>& locationOffset = realThis->agents->locationOffset;
a.locationOffsetPtr = thrust::raw_pointer_cast(locationOffset.data());
thrust::device_vector<unsigned>& possibleLocations = realThis->agents->possibleLocations;
a.possibleLocationsPtr = thrust::raw_pointer_cast(possibleLocations.data());
thrust::device_vector<unsigned>& possibleTypes = realThis->agents->possibleTypes;
a.possibleTypesPtr = thrust::raw_pointer_cast(possibleTypes.data());
thrust::device_vector<unsigned>& locationQuarantineUntil = realThis->locs->quarantineUntil;
a.locationQuarantineUntilPtr = thrust::raw_pointer_cast(locationQuarantineUntil.data());
//
// Step 1 - flag locations of anyone diagnosed yesterday
//
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_OMP
#pragma omp parallel for
for (unsigned i = 0; i < numberOfAgents; i++) { DetailedTestingOps::flagLocations(i, a); }
#elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
DetailedTestingOps::flagLocationsDriver<<<(numberOfAgents - 1) / 256 + 1, 256>>>(a, numberOfAgents);
cudaDeviceSynchronize();
#endif
//
// Step 2 - do the testing
//
#if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_OMP
#pragma omp parallel for
for (unsigned i = 0; i < numberOfAgents; i++) { DetailedTestingOps::doTesting(i, a); }
#elif THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
DetailedTestingOps::doTestingDriver<<<(numberOfAgents - 1) / 256 + 1, 256>>>(a, numberOfAgents);
cudaDeviceSynchronize();
#endif
//
// Step 3 - calculate statistics
//
unsigned timestamp = simTime.getTimestamp();
// Count up those who were tested just now
unsigned tests = thrust::count(lastTest.begin(), lastTest.end(), timestamp);
// TODO: count up tests performed in movementPolicy
//...
// Count up those who have just been diagnosed because of this testing policy
unsigned positive1 = thrust::count_if(agentStats.begin(), agentStats.end(), [timestamp] HD(const AgentStats& s) {
return s.diagnosedTimestamp == timestamp;
});
// Count up those who were diagnosed yesterday, because of a doctor/hospital visit (in movementPolicy)
unsigned positive2 =
thrust::count_if(agentStats.begin(), agentStats.end(), [timestamp, timeStep] HD(const AgentStats& s) {
return s.diagnosedTimestamp < timestamp && s.diagnosedTimestamp > timestamp - 24 * 60 / timeStep;
});
stats = thrust::make_tuple(tests, positive1, positive2);
}
};
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 24;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(t1-1,2)),ceild(24*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(12*t1+Ny+21,24)),floord(24*t2+Ny+20,24)),floord(24*t1-24*t2+Nz+Ny+19,24));t3++) {
for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(24*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(24*t3+Nx+20,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),24*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),24*t3+22),128*t4+126),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(128*t4,t5+1);
ubv=min(128*t4+127,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
opencl_gpg_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for GPG format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* converted to use 'common' code, Feb29-Mar1 2016, JimF. Also, added
* CPU handling of all 'types' which we do not yet have in GPU.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_gpg;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_gpg);
#else
#include <string.h>
#include <openssl/aes.h>
#include <assert.h>
#include <openssl/blowfish.h>
#include <openssl/ripemd.h>
#include <openssl/cast.h>
#include "idea-JtR.h"
#include <openssl/bn.h>
#include <openssl/dsa.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "md5.h"
#include "rc4.h"
#include "pdfcrack_md5.h"
#include "sha.h"
#include "common-opencl.h"
#include "options.h"
#include "sha2.h"
#include "stdint.h"
#include "gpg_common.h"
#define FORMAT_LABEL "gpg-opencl"
#define FORMAT_NAME "OpenPGP / GnuPG Secret Key"
#define ALGORITHM_NAME "SHA1 OpenCL"
#define SALT_SIZE sizeof(struct gpg_common_custom_salt*)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
extern volatile int bench_running;
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} gpg_password;
typedef struct {
uint8_t v[32];
} gpg_hash;
typedef struct {
uint32_t length;
uint32_t count;
uint32_t key_len;
uint8_t salt[SALT_LENGTH];
} gpg_salt;
static int *cracked;
static int any_cracked;
static cl_int cl_error;
static gpg_password *inbuffer;
static gpg_hash *outbuffer;
static gpg_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(gpg_password) * gws;
outsize = sizeof(gpg_hash) * gws;
settingsize = sizeof(gpg_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DPLAINTEXT_LENGTH=%d -DSALT_LENGTH=%d",
PLAINTEXT_LENGTH, SALT_LENGTH);
opencl_init("$JOHN/kernels/gpg_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "gpg", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(gpg_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 300);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void set_salt(void *salt)
{
gpg_common_cur_salt = *(struct gpg_common_custom_salt **)salt;
currentsalt.length = SALT_LENGTH;
memcpy((char*)currentsalt.salt, gpg_common_cur_salt->salt, currentsalt.length);
currentsalt.count = gpg_common_cur_salt->count;
currentsalt.key_len = gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm);
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy setting to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint32_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint32_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
// printf ("spec=%s pk_algorithm=%d hash_algorithm=%d cipher_algorithm=%d key_size=%d\n",
// gpg_common_cur_salt->spec==SPEC_SIMPLE?"SIMPLE":(gpg_common_cur_salt->spec==SPEC_SALTED?"SALTED":"ISALTED"),
// gpg_common_cur_salt->pk_algorithm,
// gpg_common_cur_salt->hash_algorithm,
// gpg_common_cur_salt->cipher_algorithm,
// gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm));
// Ok, if a format we do NOT handle, then use CPU code.
// Right now we ONLY handle 'simple' SHA1 spec-iterated-salt in GPU
if (gpg_common_cur_salt->spec != SPEC_ITERATED_SALTED ||
gpg_common_cur_salt->hash_algorithm != HASH_SHA1/* ||
gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm) > 20*/
) {
// Code taken straight from the CPU version. Since we do not
// have 'special' GPU support, we simply fall back.
static int warned_once = 0;
int ks = gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm);
if (!warned_once && !bench_running) {
fprintf(stderr, "[-] WARNING there are some input gpg hashes which"
" will be run using CPU code, not GPU code\n");
warned_once = 1;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
int res;
char pass[128];
unsigned char keydata[64];
memcpy(pass, inbuffer[index].v, inbuffer[index].length);
pass[inbuffer[index].length] = '\0';
gpg_common_cur_salt->s2kfun(pass, keydata, ks);
res = gpg_common_check(keydata, ks);
if (res) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]),
"Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]),
"Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (gpg_common_check(outbuffer[index].v, gpg_common_keySize(gpg_common_cur_salt->cipher_algorithm)))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
/*
* Report gpg --s2k-count n as 1st tunable cost,
* hash algorithm as 2nd tunable cost,
* cipher algorithm as 3rd tunable cost.
*/
struct fmt_main fmt_opencl_gpg = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_DYNA_SALT,
{
"s2k-count", /* only for gpg --s2k-mode 3, see man gpg, option --s2k-count n */
"hash algorithm [1:MD5 2:SHA1 3:RIPEMD160 8:SHA256 9:SHA384 10:SHA512 11:SHA224]",
"cipher algorithm [1:IDEA 2:3DES 3:CAST5 4:Blowfish 7:AES128 8:AES192 9:AES256]",
},
{ FORMAT_TAG },
gpg_common_gpg_tests
},
{
init,
done,
reset,
fmt_default_prepare,
gpg_common_valid,
fmt_default_split,
fmt_default_binary,
gpg_common_get_salt,
{
gpg_common_gpg_s2k_count,
gpg_common_gpg_hash_algorithm,
gpg_common_gpg_cipher_algorithm,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
GB_unaryop__lnot_int16_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int16_fp32
// op(A') function: GB_tran__lnot_int16_fp32
// C type: int16_t
// A type: float
// cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
float
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
int16_t z ; GB_CAST_SIGNED(z,x,16) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT16 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int16_fp32
(
int16_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int16_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__lnot_int64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int64_fp64
// op(A') function: GB_tran__lnot_int64_fp64
// C type: int64_t
// A type: double
// cast: int64_t cij ; GB_CAST_SIGNED(cij,aij,64)
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
double
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int64_t z ; GB_CAST_SIGNED(z,aij,64) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT64 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int64_fp64
(
int64_t *Cx, // Cx and Ax may be aliased
double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__div_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__div_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint16)
// A*D function (colscale): GB (_AxD__div_uint16)
// D*A function (rowscale): GB (_DxB__div_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint16)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint16)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint16)
// C=scalar+B GB (_bind1st__div_uint16)
// C=scalar+B' GB (_bind1st_tran__div_uint16)
// C=A+scalar GB (_bind2nd__div_uint16)
// C=A'+scalar GB (_bind2nd_tran__div_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 16) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT16 || GxB_NO_DIV_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 16) ; \
}
GrB_Info GB (_bind1st_tran__div_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 16) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
9.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <glib.h>
#include <omp.h>
// libgsl0-dev
#include <gsl/gsl_math.h>
#include <gsl/gsl_rng.h>
#include <sys/time.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_sort_double.h>
double pi(int n, int batch) {
const gsl_rng_type * T;
gsl_rng * r;
gsl_rng_env_setup();
T = gsl_rng_default;
r = gsl_rng_alloc (T);
int end = (n / batch);
double sum = 0;
int i, j;
// GRand * grand = g_rand_new ();
//g_rand_double(grand);
#pragma omp parallel for default(shared) firstprivate(r) private(j) reduction (+:sum)
for (i = 0; i < end; i++) {
for (j=0; j<batch; j++) {
double a = gsl_rng_uniform_pos (r);
double b = gsl_rng_uniform_pos (r);
double c = (a * a) + (b * b);
if (c <= 1) {
sum += c;
}
}
}
return 8 * sum / n;
}
int main () {
printf("%0.15f\n", pi(100000000, 1000));
}
|
DRB015-outofbounds-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outmost loop is be parallelized.
But the inner level loop has out of bound access for b[i][j] when j equals to 0.
This will case memory access of a previous row's last element.
For example, an array of 4x4:
j=0 1 2 3
i=0 x x x x
1 x x x x
2 x x x x
3 x x x x
outer loop: i=2,
inner loop: j=0
array element accessed b[i][j-1] becomes b[2][-1], which in turn is b[1][3]
due to linearized row-major storage of the 2-D array.
This causes loop-carried data dependence between i=2 and i=1.
Data race pair: b[i][j]@80:7 vs. b[i][j-1]@80:15
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
omprace_init();
int i,j;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int n=len, m=len;
double b[n][m];
#pragma omp parallel for private(j)
for (i=1;i<n;i++)
for (j=0;j<m;j++) // Note there will be out of bound access
b[i][j]=b[i][j-1];
omprace_fini();
return 0;
}
|
pr36790.c | /* PR middle-end/36790 */
/* { dg-do compile } */
/* { dg-options "-fopenmp" } */
void
foo (char b)
{
}
void
bar (char b)
{
foo (b);
#pragma omp task default (shared)
b = 0;
}
int
main ()
{
bar (0);
return 0;
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1UL << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L A H E I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLAHEImage() is a variant of adaptive histogram equalization in which the
% contrast amplification is limited, so as to reduce this problem of noise
% amplification.
%
% Adapted from implementation by Karel Zuiderveld, karel@cv.ruu.nl in
% "Graphics Gems IV", Academic Press, 1994.
%
% The format of the CLAHEImage method is:
%
% MagickBooleanType CLAHEImage(Image *image,const size_t width,
% const size_t height,const size_t number_bins,const double clip_limit,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the tile divisions to use in horizontal direction.
%
% o height: the height of the tile divisions to use in vertical direction.
%
% o number_bins: number of bins for histogram ("dynamic range").
%
% o clip_limit: contrast limit for localised changes in contrast. A limit
% less than 1 results in standard non-contrast limited AHE.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _RangeInfo
{
unsigned short
min,
max;
} RangeInfo;
static void ClipCLAHEHistogram(const double clip_limit,const size_t number_bins,
size_t *histogram)
{
#define NumberCLAHEGrays (65536)
register ssize_t
i;
size_t
cumulative_excess,
previous_excess,
step;
ssize_t
excess;
/*
Compute total number of excess pixels.
*/
cumulative_excess=0;
for (i=0; i < (ssize_t) number_bins; i++)
{
excess=(ssize_t) histogram[i]-(ssize_t) clip_limit;
if (excess > 0)
cumulative_excess+=excess;
}
/*
Clip histogram and redistribute excess pixels across all bins.
*/
step=cumulative_excess/number_bins;
excess=(ssize_t) (clip_limit-step);
for (i=0; i < (ssize_t) number_bins; i++)
{
if ((double) histogram[i] > clip_limit)
histogram[i]=(size_t) clip_limit;
else
if ((ssize_t) histogram[i] > excess)
{
cumulative_excess-=histogram[i]-excess;
histogram[i]=(size_t) clip_limit;
}
else
{
cumulative_excess-=step;
histogram[i]+=step;
}
}
/*
Redistribute remaining excess.
*/
do
{
register size_t
*p;
size_t
*q;
previous_excess=cumulative_excess;
p=histogram;
q=histogram+number_bins;
while ((cumulative_excess != 0) && (p < q))
{
step=number_bins/cumulative_excess;
if (step < 1)
step=1;
for (p=histogram; (p < q) && (cumulative_excess != 0); p+=step)
if ((double) *p < clip_limit)
{
(*p)++;
cumulative_excess--;
}
p++;
}
} while ((cumulative_excess != 0) && (cumulative_excess < previous_excess));
}
static void GenerateCLAHEHistogram(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const size_t number_bins,
const unsigned short *lut,const unsigned short *pixels,size_t *histogram)
{
register const unsigned short
*p;
register ssize_t
i;
/*
Classify the pixels into a gray histogram.
*/
for (i=0; i < (ssize_t) number_bins; i++)
histogram[i]=0L;
p=pixels;
for (i=0; i < (ssize_t) tile_info->height; i++)
{
const unsigned short
*q;
q=p+tile_info->width;
while (p < q)
histogram[lut[*p++]]++;
q+=clahe_info->width;
p=q-tile_info->width;
}
}
static void InterpolateCLAHE(const RectangleInfo *clahe_info,const size_t *Q12,
const size_t *Q22,const size_t *Q11,const size_t *Q21,
const RectangleInfo *tile,const unsigned short *lut,unsigned short *pixels)
{
ssize_t
y;
unsigned short
intensity;
/*
Bilinear interpolate four tiles to eliminate boundary artifacts.
*/
for (y=(ssize_t) tile->height; y > 0; y--)
{
register ssize_t
x;
for (x=(ssize_t) tile->width; x > 0; x--)
{
intensity=lut[*pixels];
*pixels++=(unsigned short ) (PerceptibleReciprocal((double) tile->width*
tile->height)*(y*(x*Q12[intensity]+(tile->width-x)*Q22[intensity])+
(tile->height-y)*(x*Q11[intensity]+(tile->width-x)*Q21[intensity])));
}
pixels+=(clahe_info->width-tile->width);
}
}
static void GenerateCLAHELut(const RangeInfo *range_info,
const size_t number_bins,unsigned short *lut)
{
ssize_t
i;
unsigned short
delta;
/*
Scale input image [intensity min,max] to [0,number_bins-1].
*/
delta=(unsigned short) ((range_info->max-range_info->min)/number_bins+1);
for (i=(ssize_t) range_info->min; i <= (ssize_t) range_info->max; i++)
lut[i]=(unsigned short) ((i-range_info->min)/delta);
}
static void MapCLAHEHistogram(const RangeInfo *range_info,
const size_t number_bins,const size_t number_pixels,size_t *histogram)
{
double
scale,
sum;
register ssize_t
i;
/*
Rescale histogram to range [min-intensity .. max-intensity].
*/
scale=(double) (range_info->max-range_info->min)/number_pixels;
sum=0.0;
for (i=0; i < (ssize_t) number_bins; i++)
{
sum+=histogram[i];
histogram[i]=(size_t) (range_info->min+scale*sum);
if (histogram[i] > range_info->max)
histogram[i]=range_info->max;
}
}
static MagickBooleanType CLAHE(const RectangleInfo *clahe_info,
const RectangleInfo *tile_info,const RangeInfo *range_info,
const size_t number_bins,const double clip_limit,unsigned short *pixels)
{
MemoryInfo
*tile_cache;
register unsigned short
*p;
size_t
limit,
*tiles;
ssize_t
y;
unsigned short
lut[NumberCLAHEGrays];
/*
Constrast limited adapted histogram equalization.
*/
if (clip_limit == 1.0)
return(MagickTrue);
tile_cache=AcquireVirtualMemory((size_t) clahe_info->x*clahe_info->y,
number_bins*sizeof(*tiles));
if (tile_cache == (MemoryInfo *) NULL)
return(MagickFalse);
tiles=(size_t *) GetVirtualMemoryBlob(tile_cache);
limit=(size_t) (clip_limit*(tile_info->width*tile_info->height)/number_bins);
if (limit < 1UL)
limit=1UL;
/*
Generate greylevel mappings for each tile.
*/
GenerateCLAHELut(range_info,number_bins,lut);
p=pixels;
for (y=0; y < (ssize_t) clahe_info->y; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) clahe_info->x; x++)
{
size_t
*histogram;
histogram=tiles+(number_bins*(y*clahe_info->x+x));
GenerateCLAHEHistogram(clahe_info,tile_info,number_bins,lut,p,histogram);
ClipCLAHEHistogram((double) limit,number_bins,histogram);
MapCLAHEHistogram(range_info,number_bins,tile_info->width*
tile_info->height,histogram);
p+=tile_info->width;
}
p+=clahe_info->width*(tile_info->height-1);
}
/*
Interpolate greylevel mappings to get CLAHE image.
*/
p=pixels;
for (y=0; y <= (ssize_t) clahe_info->y; y++)
{
OffsetInfo
offset;
RectangleInfo
tile;
register ssize_t
x;
tile.height=tile_info->height;
tile.y=y-1;
offset.y=tile.y+1;
if (y == 0)
{
/*
Top row.
*/
tile.height=tile_info->height >> 1;
tile.y=0;
offset.y=0;
}
else
if (y == (ssize_t) clahe_info->y)
{
/*
Bottom row.
*/
tile.height=(tile_info->height+1) >> 1;
tile.y=clahe_info->y-1;
offset.y=tile.y;
}
for (x=0; x <= (ssize_t) clahe_info->x; x++)
{
tile.width=tile_info->width;
tile.x=x-1;
offset.x=tile.x+1;
if (x == 0)
{
/*
Left column.
*/
tile.width=tile_info->width >> 1;
tile.x=0;
offset.x=0;
}
else
if (x == (ssize_t) clahe_info->x)
{
/*
Right column.
*/
tile.width=(tile_info->width+1) >> 1;
tile.x=clahe_info->x-1;
offset.x=tile.x;
}
InterpolateCLAHE(clahe_info,
tiles+(number_bins*(tile.y*clahe_info->x+tile.x)), /* Q12 */
tiles+(number_bins*(tile.y*clahe_info->x+offset.x)), /* Q22 */
tiles+(number_bins*(offset.y*clahe_info->x+tile.x)), /* Q11 */
tiles+(number_bins*(offset.y*clahe_info->x+offset.x)), /* Q21 */
&tile,lut,p);
p+=tile.width;
}
p+=clahe_info->width*(tile.height-1);
}
tile_cache=RelinquishVirtualMemory(tile_cache);
return(MagickTrue);
}
MagickExport MagickBooleanType CLAHEImage(Image *image,const size_t width,
const size_t height,const size_t number_bins,const double clip_limit,
ExceptionInfo *exception)
{
#define CLAHEImageTag "CLAHE/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
MagickBooleanType
status;
MagickOffsetType
progress;
MemoryInfo
*pixel_cache;
RangeInfo
range_info;
RectangleInfo
clahe_info,
tile_info;
size_t
n;
ssize_t
y;
unsigned short
*pixels;
/*
Configure CLAHE parameters.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
range_info.min=0;
range_info.max=NumberCLAHEGrays-1;
tile_info.width=width;
if (tile_info.width == 0)
tile_info.width=image->columns >> 3;
tile_info.height=height;
if (tile_info.height == 0)
tile_info.height=image->rows >> 3;
tile_info.x=0;
if ((image->columns % tile_info.width) != 0)
tile_info.x=(ssize_t) tile_info.width-(image->columns % tile_info.width);
tile_info.y=0;
if ((image->rows % tile_info.height) != 0)
tile_info.y=(ssize_t) tile_info.height-(image->rows % tile_info.height);
clahe_info.width=image->columns+tile_info.x;
clahe_info.height=image->rows+tile_info.y;
clahe_info.x=(ssize_t) clahe_info.width/tile_info.width;
clahe_info.y=(ssize_t) clahe_info.height/tile_info.height;
pixel_cache=AcquireVirtualMemory(clahe_info.width,clahe_info.height*
sizeof(*pixels));
if (pixel_cache == (MemoryInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
pixels=(unsigned short *) GetVirtualMemoryBlob(pixel_cache);
colorspace=image->colorspace;
if (TransformImageColorspace(image,LabColorspace,exception) == MagickFalse)
{
pixel_cache=RelinquishVirtualMemory(pixel_cache);
return(MagickFalse);
}
/*
Initialize CLAHE pixels.
*/
image_view=AcquireVirtualCacheView(image,exception);
progress=0;
status=MagickTrue;
n=0;
for (y=0; y < (ssize_t) clahe_info.height; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-(tile_info.x >> 1),y-
(tile_info.y >> 1),clahe_info.width,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) clahe_info.width; x++)
{
pixels[n++]=ScaleQuantumToShort(p[0]);
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status=CLAHE(&clahe_info,&tile_info,&range_info,number_bins == 0 ?
(size_t) 128 : MagickMin(number_bins,256),clip_limit,pixels);
if (status == MagickFalse)
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
/*
Push CLAHE pixels to CLAHE image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
n=clahe_info.width*(tile_info.y >> 1);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
n+=tile_info.x >> 1;
for (x=0; x < (ssize_t) image->columns; x++)
{
q[0]=ScaleShortToQuantum(pixels[n++]);
q+=GetPixelChannels(image);
}
n+=(clahe_info.width-image->columns-(tile_info.x >> 1));
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CLAHEImageTag,progress,2*
GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
pixel_cache=RelinquishVirtualMemory(pixel_cache);
if (TransformImageColorspace(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
status=InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
if (status == MagickFalse)
break;
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*black));
white=(double *) AcquireQuantumMemory(MaxPixelChannels,sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) memset(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
if (black[i] != white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (black[j] == white[j])
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ContrastStretchImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,0,0,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
if (total_weight > MagickEpsilon)
{
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
}
SetPixelViaPixelInfo(image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EnhanceImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,MaxPixelChannels*sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) memset(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=(double) p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(intensity))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) memset(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) memset(black,0,sizeof(*black));
(void) memset(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel = GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,EqualizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) memset(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=gamma_map[ScaleQuantumToMap(ClampToQuantum((MagickRealType)
q[j]))];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GammaCorrectImageTag,progress, image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,GrayscaleImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
if ((method == Rec601LuminancePixelIntensityMethod) ||
(method == Rec709LuminancePixelIntensityMethod))
return(SetImageColorspace(image,LinearGRAYColorspace,exception));
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
pixel2=zero;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
if (status == MagickFalse)
break;
status=InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
if (status == MagickFalse)
break;
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,HaldClutImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
scale=PerceptibleReciprocal(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,LevelizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) memset(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ModulateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel = GetPixelChannelChannel(image,j);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,NegateImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
optimizer.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#include "np_helper/np_helper.h"
int int2e_sph();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt));
opt0->nbas = nbas;
opt0->direct_scf_cutoff = 1e-14;
opt0->q_cond = NULL;
opt0->dm_cond = NULL;
opt0->fprescreen = &CVHFnoscreen;
opt0->r_vkscreen = &CVHFr_vknoscreen;
*opt = opt0;
}
void CVHFdel_optimizer(CVHFOpt **opt)
{
CVHFOpt *opt0 = *opt;
if (!opt0) {
return;
}
if (!opt0->q_cond) {
free(opt0->q_cond);
}
if (!opt0->dm_cond) {
free(opt0->dm_cond);
}
free(opt0);
*opt = NULL;
}
int CVHFnoscreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
return 1;
}
int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
size_t n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > opt->direct_scf_cutoff;
}
int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
size_t n = opt->nbas;
double *q_cond = opt->q_cond;
double *dm_cond = opt->dm_cond;
assert(q_cond);
assert(dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = q_cond[i*n+j] * q_cond[k*n+l];
double direct_scf_cutoff = opt->direct_scf_cutoff;
return qijkl > direct_scf_cutoff
&&((4*dm_cond[j*n+i]*qijkl > direct_scf_cutoff)
|| (4*dm_cond[l*n+k]*qijkl > direct_scf_cutoff)
|| ( dm_cond[j*n+k]*qijkl > direct_scf_cutoff)
|| ( dm_cond[j*n+l]*qijkl > direct_scf_cutoff)
|| ( dm_cond[i*n+k]*qijkl > direct_scf_cutoff)
|| ( dm_cond[i*n+l]*qijkl > direct_scf_cutoff));
}
int CVHFnrs8_vj_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
size_t n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double direct_scf_cutoff = opt->direct_scf_cutoff;
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > direct_scf_cutoff
&&((4*qijkl*opt->dm_cond[j*n+i] > direct_scf_cutoff)
|| (4*qijkl*opt->dm_cond[l*n+k] > direct_scf_cutoff));
}
int CVHFnrs8_vk_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
size_t n = opt->nbas;
double *q_cond = opt->q_cond;
double *dm_cond = opt->dm_cond;
assert(q_cond);
assert(dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = q_cond[i*n+j] * q_cond[k*n+l];
double direct_scf_cutoff = opt->direct_scf_cutoff;
return qijkl > direct_scf_cutoff
&&(( dm_cond[j*n+k]*qijkl > direct_scf_cutoff)
|| ( dm_cond[j*n+l]*qijkl > direct_scf_cutoff)
|| ( dm_cond[i*n+k]*qijkl > direct_scf_cutoff)
|| ( dm_cond[i*n+l]*qijkl > direct_scf_cutoff));
}
// return flag to decide whether transpose01324
int CVHFr_vknoscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int idm;
for (idm = 0; idm < n_dm; idm++) {
dms_cond[idm] = NULL;
}
*dm_atleast = 0;
return 1;
}
int CVHFnr3c2e_vj_pass1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
size_t n = opt->nbas;
int i = shls[0];
int j = shls[1];
// Be careful with the range of basis k, which is between nbas and
// nbas+nauxbas. See shls_slice in df_jk.get_j function.
int k = shls[2] - n;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
double direct_scf_cutoff = opt->direct_scf_cutoff;
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k];
return qijkl > direct_scf_cutoff
&& (4*qijkl*opt->dm_cond[j*n+i] > direct_scf_cutoff);
}
int CVHFnr3c2e_vj_pass2_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
size_t n = opt->nbas;
int i = shls[0];
int j = shls[1];
// Be careful with the range of basis k, which is between nbas and
// nbas+nauxbas. See shls_slice in df_jk.get_j function.
int k = shls[2] - n;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
double direct_scf_cutoff = opt->direct_scf_cutoff;
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k];
return qijkl > direct_scf_cutoff
&& (4*qijkl*opt->dm_cond[k] > direct_scf_cutoff);
}
int CVHFnr3c2e_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
size_t n = opt->nbas;
int i = shls[0];
int j = shls[1];
// Be careful with the range of basis k, which is between nbas and
// nbas+nauxbas. See shls_slice in df_jk.get_j function.
int k = shls[2] - n;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k];
return qijkl > opt->direct_scf_cutoff;
}
void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff)
{
opt->direct_scf_cutoff = cutoff;
}
double CVHFget_direct_scf_cutoff(CVHFOpt *opt)
{
return opt->direct_scf_cutoff;
}
void CVHFsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
// nbas in the input arguments may different to opt->nbas.
// Use opt->nbas because it is used in the prescreen function
nbas = opt->nbas;
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
CVHFset_int2e_q_cond(intor, cintopt, opt->q_cond, ao_loc,
atm, natm, bas, nbas, env);
}
/*
* Non-relativistic 2-electron integrals
*/
void CVHFset_int2e_q_cond(int (*intor)(), CINTOpt *cintopt, double *q_cond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
double qtmp, tmp;
size_t ij, i, j, di, dj, ish, jsh;
size_t Nbas = nbas;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < Nbas*(Nbas+1)/2; ij++) {
ish = (size_t)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
q_cond[ish*nbas+jsh] = qtmp;
q_cond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFset_q_cond(CVHFOpt *opt, double *q_cond, int len)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * len);
NPdcopy(opt->q_cond, q_cond, len);
}
void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
// nbas in the input arguments may different to opt->nbas.
// Use opt->nbas because it is used in the prescreen function
nbas = opt->nbas;
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas);
const size_t nao = ao_loc[nbas];
double dmax, tmp;
size_t i, j, ish, jsh, iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
// symmetrize dm_cond because nrs8_prescreen only tests the lower (or upper)
// triangular part of dm_cond. Without the symmetrization, some integrals may be
// incorrectly skipped.
tmp = .5 * (fabs(pdm[i*nao+j]) + fabs(pdm[j*nao+i]));
dmax = MAX(dmax, tmp);
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
opt->dm_cond[jsh*nbas+ish] = dmax;
} }
}
void CVHFset_dm_cond(CVHFOpt *opt, double *dm_cond, int len)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * len);
NPdcopy(opt->dm_cond, dm_cond, len);
}
/*
*************************************************
*/
void CVHFnr_optimizer(CVHFOpt **vhfopt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env);
(*vhfopt)->fprescreen = &CVHFnrs8_prescreen;
CVHFsetnr_direct_scf(*vhfopt, intor, cintopt, ao_loc,
atm, natm, bas, nbas, env);
}
|
pnccopy.c | /**
* Copyright 2019 Scott Wales
*
* \author Scott Wales <scott.wales@unimelb.edu.au>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stddef.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
#include "netcdf.h"
#include "hdf5.h"
#include "hdf5_hl.h"
#include "zlib.h"
#define ERR_IF(x) err_if_(x, #x, __FILE__, __LINE__)
void err_if_(bool err, const char * message, const char * file, size_t line) {
if (err) {
fprintf(stderr, "%s:%zu %s\n", file, line, message);
exit(-1);
}
}
void err_if_extra(bool err, const char * message1, const char * message2, const char * file, size_t line) {
if (err) {
fprintf(stderr, "%s:%zu %s %s\n", file, line, message1, message2);
exit(-1);
}
}
#define NC_ERR(x) nc_err_(x, #x, __FILE__, __LINE__)
void nc_err_(int err, const char * message, const char * file, size_t line) {
const char * nc_str = nc_strerror(err);
err_if_extra(err != NC_NOERR, message, nc_str, file, line);
}
#define H5_ERR(x) h5_err_(x, #x, __FILE__, __LINE__)
hid_t h5_err_(hid_t err, const char * message, const char * file, size_t line) {
if (err < 0) {
H5Eprint(H5Eget_current_stack(), stderr);
err_if_(err < 0, message, file, line);
}
return err;
}
#define Z_ERR(x) z_err_(x, #x, __FILE__, __LINE__)
void z_err_(hid_t err, const char * message, const char * file, size_t line) {
err_if_extra(err == Z_BUF_ERROR, message, "Z_BUF_ERROR", file, line);
err_if_extra(err == Z_MEM_ERROR, message, "Z_MEM_ERROR", file, line);
err_if_extra(err == Z_STREAM_ERROR, message, "Z_STREAM_ERROR", file, line);
err_if_extra(err == Z_DATA_ERROR, message, "Z_DATA_ERROR", file, line);
}
void copy_dim(int dimid, int ncidin, int ncidout) {
char name[NC_MAX_NAME+1];
size_t len;
int dimidout;
NC_ERR(nc_inq_dim(ncidin, dimid, name, &len));
NC_ERR(nc_def_dim(ncidout, name, len, &dimidout));
ERR_IF(dimid != dimidout);
}
void copy_att(int varid, int attid, int ncidin, int ncidout) {
char name[NC_MAX_NAME+1];
nc_type xtype;
size_t len;
NC_ERR(nc_inq_attname(ncidin, varid, attid, name));
NC_ERR(nc_inq_att(ncidin, varid, name, &xtype, &len));
char buffer[len * 8];
NC_ERR(nc_get_att(ncidin, varid, name, buffer));
NC_ERR(nc_put_att(ncidout, varid, name, xtype, len, buffer));
}
void copy_contiguous(int ndims, int dimids[], int varid, int ncidin, int ncidout) {
size_t buffer_size = sizeof(double);
for (int d=0; d<ndims; ++d) {
size_t len;
NC_ERR(nc_inq_dimlen(ncidin, dimids[d], &len));
buffer_size *= len;
}
void * buffer = malloc(buffer_size);
NC_ERR(nc_get_var(ncidin, varid, buffer));
NC_ERR(nc_put_var(ncidout, varid, buffer));
free(buffer);
}
void copy_var(int varid, int ncidin, int ncidout) {
char name[NC_MAX_NAME+1];
int ndims;
int natts;
nc_type xtype;
int varidout;
NC_ERR(nc_inq_var(ncidin, varid, name, &xtype, &ndims, NULL, &natts));
int dimids[ndims];
int shuffle;
int deflate;
int deflate_level;
int contiguous;
size_t chunksizes[ndims];
NC_ERR(nc_inq_var(ncidin, varid, NULL, NULL, NULL, dimids, NULL));
NC_ERR(nc_inq_var_deflate(ncidin, varid, &shuffle, &deflate, &deflate_level));
NC_ERR(nc_inq_var_chunking(ncidin, varid, &contiguous, chunksizes));
NC_ERR(nc_def_var(ncidout, name, xtype, ndims, dimids, &varidout));
NC_ERR(nc_def_var_deflate(ncidout, varidout, shuffle, deflate, 6));
NC_ERR(nc_def_var_chunking(ncidout, varidout, contiguous, chunksizes));
ERR_IF(varid != varidout);
for (int a=0; a<natts; ++a) {
copy_att(varid, a, ncidin, ncidout);
}
if (contiguous == NC_CONTIGUOUS) {
copy_contiguous(ndims, dimids, varid, ncidin, ncidout);
}
}
void copy_structure(const char * pathin, const char * pathout) {
int ncidin;
NC_ERR(nc_open(pathin, NC_NOWRITE, &ncidin));
int ncidout;
NC_ERR(nc_create(pathout, NC_NOCLOBBER | NC_NETCDF4, &ncidout));
int ndims, nvars, natts;
NC_ERR(nc_inq(ncidin, &ndims, &nvars, &natts, NULL));
printf("Dims %d, Vars %d, Atts %d\n", ndims, nvars, natts);
for (int d=0; d<ndims; ++d) {
copy_dim(d, ncidin, ncidout);
}
for (int v=0; v<nvars; ++v) {
copy_var(v, ncidin, ncidout);
}
for (int a=0; a<natts; ++a) {
copy_att(NC_GLOBAL, a, ncidin, ncidout);
}
NC_ERR(nc_close(ncidout));
NC_ERR(nc_close(ncidin));
}
size_t recompress_chunk(void ** buffer, size_t * buffer_size, size_t read_size, void ** tmp_buffer, size_t * tmp_buffer_size, int level) {
// Uncompress buffer into tmp_buffer
unsigned long destlen = *tmp_buffer_size;
int err = uncompress(*tmp_buffer, &destlen, *buffer, read_size);
if (err == Z_BUF_ERROR) {
// Decompress buffer not big enough, reallocate
if (*tmp_buffer_size < *buffer_size) {
*tmp_buffer_size = *buffer_size;
}
*tmp_buffer_size *= 4;
*tmp_buffer = realloc(*tmp_buffer, *tmp_buffer_size);
return recompress_chunk(buffer, buffer_size, read_size, tmp_buffer, tmp_buffer_size, level);
}
Z_ERR(err);
// Compress tmp_buffer back into buffer
size_t uncompressed_size = destlen;
size_t min_size = compressBound(uncompressed_size);
if (*buffer_size < min_size) {
*buffer_size = min_size;
*buffer = realloc(*buffer, *buffer_size);
}
destlen = *buffer_size;
Z_ERR(compress2(*buffer, &destlen, *tmp_buffer, uncompressed_size, level));
return destlen;
}
int get_compression_level(hid_t data) {
hid_t pwrite = H5_ERR(H5Dget_create_plist(data));
unsigned int filter_flags;
size_t filter_elements=1;
unsigned int filter_values[filter_elements];
unsigned int filter_config;
int err = H5Pget_filter_by_id(pwrite, H5Z_FILTER_DEFLATE, &filter_flags, &filter_elements, filter_values, 0, NULL, &filter_config);
H5_ERR(H5Pclose(pwrite));
if (err < 0) {
return -1;
} else {
return filter_values[0];
}
}
void copy_chunks(int ndims, const hsize_t size[], const hsize_t chunksize[], hid_t data_in, hid_t data_out) {
size_t nchunks = 1;
size_t nchunks_d[ndims];
for (int d=0; d<ndims; ++d) {
nchunks_d[d] = (size_t)ceil(size[d] / (float)chunksize[d]);
nchunks *= nchunks_d[d];
}
hid_t pread = H5_ERR(H5Pcreate(H5P_DATASET_XFER));
int level = get_compression_level(data_out);
int progress = 0;
printf("% 3.0f", 0.0);
#pragma omp parallel default(shared)
{
size_t buffer_size = 0;
void * buffer = NULL;
size_t tmp_buffer_size = 1024;
void * tmp_buffer = malloc(tmp_buffer_size);
#pragma omp for
for (size_t c=0; c<nchunks; ++c) {
hsize_t offset[ndims];
size_t tmp = c;
for (int d=0; d<ndims; ++d) {
offset[d] = tmp % nchunks_d[d] * chunksize[d];
tmp = tmp / nchunks_d[d];
}
hsize_t chunk_size;
#pragma omp critical
{
H5_ERR(H5Dget_chunk_storage_size(data_in, offset, &chunk_size));
}
if (chunk_size > buffer_size) {
buffer_size = chunk_size;
buffer = realloc(buffer, buffer_size);
}
uint32_t filter_mask = 0;
#pragma omp critical
{
H5_ERR(H5DOread_chunk(data_in, pread, offset, &filter_mask, buffer));
}
if (level >= 0) {
chunk_size = recompress_chunk(&buffer, &buffer_size, chunk_size, &tmp_buffer, &tmp_buffer_size, level);
}
#pragma omp critical
{
H5_ERR(H5DOwrite_chunk(data_out, pread, filter_mask, offset, chunk_size, buffer));
++progress;
if (progress % 100 == 0) {
printf("\b\b\b% 3.0f", progress/(float)nchunks*100);
fflush(stdout);
}
}
}
free(buffer);
free(tmp_buffer);
}
printf("\b\b\b% 3.0f", 100.0);
H5_ERR(H5Pclose(pread));
}
herr_t copy_object(hid_t oid, const char * name, const H5O_info_t * info, void * op_data) {
if (info->type != H5O_TYPE_DATASET) {return 0;}
hid_t data_in = H5_ERR(H5Dopen(oid, name, H5P_DEFAULT));
// Get chunking info
hid_t pcreate = H5_ERR(H5Dget_create_plist(data_in));
H5D_layout_t layout = H5_ERR(H5Pget_layout(pcreate));
if (layout == H5D_CHUNKED) {
printf("%s\t", name);
hid_t space = H5_ERR(H5Dget_space(data_in));
int ndims = H5_ERR(H5Sget_simple_extent_ndims(space));
hsize_t size[ndims];
H5_ERR(H5Sget_simple_extent_dims(space, size, NULL));
H5_ERR(H5Sclose(space));
hsize_t chunksize[ndims];
H5_ERR(H5Pget_chunk(pcreate, ndims, chunksize));
hid_t file_out = *(hid_t*)op_data;
hid_t data_out = H5_ERR(H5Dopen(file_out, name, H5P_DEFAULT));
copy_chunks(ndims, size, chunksize, data_in, data_out);
H5_ERR(H5Dclose(data_out));
printf("\n");
}
H5_ERR(H5Pclose(pcreate));
H5_ERR(H5Dclose(data_in));
return 0;
}
void copy_data(const char * pathin, const char * pathout) {
hid_t file_in = H5_ERR(H5Fopen(pathin, H5F_ACC_RDONLY, H5P_DEFAULT));
hid_t file_out = H5_ERR(H5Fopen(pathout, H5F_ACC_RDWR, H5P_DEFAULT));
H5_ERR(H5Ovisit(file_in, H5_INDEX_NAME, H5_ITER_NATIVE, copy_object, &file_out));
H5_ERR(H5Fclose(file_out));
H5_ERR(H5Fclose(file_in));
}
int main(int argc, char ** argv) {
fprintf(stderr, "Parallel with %d/%d threads\n", omp_get_num_threads(), omp_get_max_threads());
copy_structure(argv[1], argv[2]);
copy_data(argv[1], argv[2]);
}
|
device_utilities.h | /**
*
* OHIO STATE UNIVERSITY SOFTWARE DISTRIBUTION LICENSE
*
* Parallel CCD++ on GPU (the “Software”) Copyright (c) 2017, The Ohio State
* University. All rights reserved.
*
* The Software is available for download and use subject to the terms and
* conditions of this License. Access or use of the Software constitutes acceptance
* and agreement to the terms and conditions of this License. Redistribution and
* use of the Software in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the capitalized paragraph below.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the capitalized paragraph below in the documentation
* and/or other materials provided with the distribution.
*
* 3. The names of Ohio State University, or its faculty, staff or students may not
* be used to endorse or promote products derived from the Software without
* specific prior written permission.
*
* This software was produced with support from the National Science Foundation
* (NSF) through Award 1629548. Nothing in this work should be construed as
* reflecting the official policy or position of the Defense Department, the United
* States government, Ohio State University.
*
* THIS SOFTWARE HAS BEEN APPROVED FOR PUBLIC RELEASE, UNLIMITED DISTRIBUTION. THE
* SOFTWARE IS PROVIDED “AS IS” AND WITHOUT ANY EXPRESS, IMPLIED OR STATUTORY
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, WARRANTIES OF ACCURACY, COMPLETENESS,
* NONINFRINGEMENT, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. ACCESS OR USE OF THE SOFTWARE IS ENTIRELY AT THE USER’S RISK. IN
* NO EVENT SHALL OHIO STATE UNIVERSITY OR ITS FACULTY, STAFF OR STUDENTS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
* TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. THE SOFTWARE
* USER SHALL INDEMNIFY, DEFEND AND HOLD HARMLESS OHIO STATE UNIVERSITY AND ITS
* FACULTY, STAFF AND STUDENTS FROM ANY AND ALL CLAIMS, ACTIONS, DAMAGES, LOSSES,
* LIABILITIES, COSTS AND EXPENSES, INCLUDING ATTORNEYS’ FEES AND COURT COSTS,
* DIRECTLY OR INDIRECTLY ARISING OUT OF OR IN CONNECTION WITH ACCESS OR USE OF THE
* SOFTWARE.
*
*/
/**
*
* Author:
* Israt (nisa.1@osu.edu)
*
* Contacts:
* Israt (nisa.1@osu.edu)
* Aravind Sukumaran-Rajam (sukumaranrajam.1@osu.edu)
* P. (Saday) Sadayappan (sadayappan.1@osu.edu)
*
*/
#include "util.h"
const int THREADLOAD = 2;
int NUM_THRDS = 10;
void cuda_timerStart(cudaEvent_t start, cudaStream_t streamT) {
cudaEventRecord(start, streamT);
}
float cuda_timerEnd(cudaEvent_t start, cudaEvent_t stop, cudaStream_t streamT) {
float mili = 0;
cudaDeviceSynchronize();
cudaEventRecord(stop, streamT);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&mili, start, stop);
return mili;
}
void copy_R(SparseMatrix &R, DTYPE *copy_R) //R to R copy
{
auto val_ptr = R.get_csr_val();
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
for (int idx = R.get_csc_col_ptr()[c]; idx < R.get_csc_col_ptr()[c + 1];
++idx)
copy_R[idx] = val_ptr[idx];
}
}
void copy_R1(DTYPE *copy_R, SparseMatrix &R) {
auto val_ptr = R.get_csr_val();
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
for (int idx = R.get_csc_col_ptr()[c]; idx < R.get_csc_col_ptr()[c + 1];
++idx)
val_ptr[idx] = copy_R[idx];
}
}
void make_tile(SparseMatrix &R, MatInt &tiled_bin, const int TS) {
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
long idx = R.get_csc_col_ptr()[c];
tiled_bin[0][c] = idx;
for (int tile = TS; tile < (R.rows_ + TS - 1); tile += TS) {
int tile_no = tile / TS; // - 1;
while (R.get_csc_row_indx()[idx] < tile
&& idx < R.get_csc_col_ptr()[c + 1]) {
idx++;
}
tiled_bin[tile_no][c] = idx;
}
}
}
void make_tile_odd(SparseMatrix &R, MatInt &tiled_bin, const int TS) {
#pragma omp parallel for
for (int c = 0; c < R.cols_; ++c) {
long idx = R.get_csc_col_ptr()[c];
tiled_bin[0][c] = idx;
for (int tile = TS + (TS / 2); tile < (R.rows_ + (TS + (TS / 2)) - 1);
tile += TS) {
int tile_no = tile / TS; // - 1;
while (R.get_csc_row_indx()[idx] < tile
&& idx < R.get_csc_col_ptr()[c + 1]) {
idx++;
}
tiled_bin[tile_no][c] = idx;
}
}
}
void tiled_binning(SparseMatrix &R, int *host_rowGroupPtr, int *LB, int *UB,
int *count, MatInt &tiled_bin, const int tile_no) {
for (int i = 0; i < NUM_THRDS; i++) {
count[i] = 0;
UB[i] = (1 << i) * THREADLOAD;
LB[i] = UB[i] >> 1;
}
LB[0] = 0;
UB[NUM_THRDS - 1] = R.max_col_nnz_ + 1;
// // // // //***********binned
// omp_set_num_threads(NUM_THRDS); // create as many CPU threads as there are # of bins
// #pragma omp parallel
// {
// unsigned int cpu_thread_id = omp_get_thread_num();
// int i = cpu_thread_id; count[i] = 0;
// for (int col = 0; col < R.cols; col++){
// //for (int col = tile_no_c*5*tileSize_H; col < ((tile_no_c+1)*5*tileSize_H) && col < R.cols ; col++){
// int NNZ = tiled_bin[tile_no+1][col] - tiled_bin[tile_no][col]; // R.col_ptr[col + 1] - R.col_ptr[col];
// if (NNZ >= LB[i] && NNZ < UB[i]){
// host_rowGroupPtr[R.cols * i + count[i]++] = col;
// }
// }
// }
//*********non-binned
int i = 6;
count[i] = 0;
for (int col = 0; col < R.cols_; col++) {
host_rowGroupPtr[R.cols_ * i + count[i]++] = col;
}
//*********non-binned
// int i = 6;
// count[i] = 0;
// for (int col = 0; col < R.cols; col++){
// int NNZ = R.col_ptr[col+1] - R.col_ptr[col];
// host_rowGroupPtr[R.cols * i + count[i]++] = col;
// printf("%d %d\n",col, NNZ );
// }
// printf("done for R\n");
}
void binning(SparseMatrix &R, int *host_rowGroupPtr, int *LB, int *UB,
int *count) {
for (int i = 0; i < NUM_THRDS; i++) {
count[i] = 0;
UB[i] = (1 << i) * THREADLOAD + 1;
LB[i] = UB[i] >> 1;
}
LB[0] = 0;
UB[NUM_THRDS - 1] = R.max_col_nnz_ + 1;
omp_set_num_threads(NUM_THRDS); // create as many CPU threads as there are # of bins
#pragma omp parallel
{
unsigned int cpu_thread_id = omp_get_thread_num();
int i = cpu_thread_id;
for (int col = 0; col < R.cols_; col++) {
int NNZ = R.get_csc_col_ptr()[col + 1] - R.get_csc_col_ptr()[col];
if (NNZ > LB[i] && NNZ < UB[i]) {
host_rowGroupPtr[R.cols_ * i + count[i]++] = col; ////changed
}
}
}
}
__global__ void weighted_H_all(int const* __restrict__ R_colPtr,
DTYPE * __restrict__ H, DTYPE * __restrict__ temp_H, int m, int k) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
int nnz = R_colPtr[c + 1] - R_colPtr[c];
if (nnz != 0) {
for (int t = 0; t < k; ++t)
H[c * k + t] = temp_H[c * k + t] / nnz;
}
}
}
__global__ void weighted_H(int const* __restrict__ R_colPtr,
int const* __restrict__ R_rowLim, DTYPE * __restrict__ H,
DTYPE * __restrict__ temp_H, int m, int k) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
int nnz = R_rowLim[c] - R_colPtr[c]; ////////////-R_colPtr[c];
if (nnz != 0) {
for (int t = 0; t < k; ++t)
H[c * k + t] = temp_H[c * k + t] / nnz;
}
}
}
__global__ void assignment(int const* __restrict__ R_colPtr,
DTYPE * __restrict__ v, DTYPE * __restrict__ g, DTYPE *__restrict__ h,
DTYPE lambda, int m) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
DTYPE gc = g[c], hc = h[c];
if (hc == 0)
v[c] = 0; //
else
v[c] = gc / hc;
}
}
__global__ void GPU_rmse(int const* __restrict__ test_row,
int const * __restrict__ test_col, DTYPE const * __restrict__ test_val,
DTYPE * __restrict__ pred_v, DTYPE * __restrict__ rmse,
DTYPE const * __restrict__ W, DTYPE const * __restrict__ H, int m,
int k, int rows, int cols) {
int c = blockIdx.x * blockDim.x + threadIdx.x;
if (c < m) {
for (int t = 0; t < k; t++) {
int i = test_row[c];
int j = test_col[c];
pred_v[c] += W[t * rows + (i - 1)] * H[t * cols + (j - 1)]; //W[i-1][t] * H[j-1][t];
}
rmse[c] = (pred_v[c] - test_val[c]) * (pred_v[c] - test_val[c]);
}
}
|
vla-2.c | // { dg-do compile }
/* { dg-require-effective-target alloca } */
void foo(int n, int i)
{
int A[n];
#pragma omp parallel private(A)
{
A[i] = 0;
}
}
|
wino_conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: haoluo@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "wino_conv_kernel_x86.h"
#define TILE 4
#define ELEM_SIZE ((TILE + 2) * (TILE + 2))
#define WINO_MAX(a, b) ((a) > (b) ? (a) : (b))
#define WINO_MIN(a, b) ((a) < (b) ? (a) : (b))
static void relu(float* data, int size, int activation)
{
for (int i = 0; i < size; i++)
{
data[i] = WINO_MAX(data[i], ( float )0);
if (activation > 0)
{
data[i] = WINO_MIN(data[i], ( float )activation);
}
}
}
static int get_private_mem_size(struct ir_tensor* filter, struct conv_param* param)
{
int output_c = filter->dims[0];
int input_c = filter->dims[1];
int trans_ker_size = (unsigned long)output_c * input_c * ELEM_SIZE * sizeof(float);
return trans_ker_size + 128; // caution
}
static void pad_0_align_2D(float* dst, float* src, int m, int n, int m_align, int n_align, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, (unsigned long)m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + (i + pad_h) * n_align + pad_w, src + i * n, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void pad_0_align_3D(float* dst, float* src, int m, int n, int m_align, int n_align, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, (unsigned long)c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
pad_0_align_2D(dst + i * m_align * n_align, src + i * m * n, m, n, m_align, n_align, pad_h, pad_w);
}
}
static void delete_0_2D(float* dst, float* src, int m_align, int n_align, int m, int n, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, (unsigned long)m * n * sizeof(float));
return;
}
for (i = 0; i < m; ++i)
{
memcpy(dst + i * n, src + (i + pad_h) * n_align + pad_w, n * sizeof(float));
}
}
// pad 0 in right and down side on 3D
static void delete_0_3D(float* dst, float* src, int m_align, int n_align, int m, int n, int c, int pad_h, int pad_w)
{
int i;
if (n >= n_align && m >= m_align)
{
memcpy(dst, src, (unsigned long)c * m * n * sizeof(float));
return;
}
for (i = 0; i < c; ++i)
{
delete_0_2D(dst + i * m * n, src + i * m_align * n_align, m_align, n_align, m, n, pad_h, pad_w);
}
}
void conv3x3s1_winograd43_sse(float* bottom_blob, float* top_blob, float* kernel_tm_test, float* dot_block,
float* transform_input, float* output_bordered, float* _bias, int w, int h, int inch,
int outw, int outh, int outch, int num_thread)
{
size_t elemsize = sizeof(float);
const float* bias = _bias;
// pad to 4n+2, winograd F(4,3)
float* bottom_blob_bordered = bottom_blob;
int outw_align = (outw + 3) / 4 * 4;
int outh_align = (outh + 3) / 4 * 4;
w = outw_align + 2;
h = outh_align + 2;
// BEGIN transform input
float* bottom_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 4 * inch * tiles;
bottom_blob_tm = transform_input;
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#if __AVX__
__m256 _1_n = _mm256_set1_ps(-1);
__m256 _2_p = _mm256_set1_ps(2);
__m256 _2_n = _mm256_set1_ps(-2);
__m256 _4_p = _mm256_set1_ps(4);
__m256 _4_n = _mm256_set1_ps(-4);
__m256 _5_n = _mm256_set1_ps(-5);
#endif
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < inch; q++)
{
const float* img = bottom_blob_bordered + q * w * h;
for (int j = 0; j < nColBlocks; j++)
{
const float* r0 = img + w * j * 4;
const float* r1 = r0 + w;
const float* r2 = r1 + w;
const float* r3 = r2 + w;
const float* r4 = r3 + w;
const float* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
float* out_tm0 = bottom_blob_tm + 4 * inch * (j * nRowBlocks + i) + 4 * q;
float* out_tm1 = out_tm0 + tiles_n;
float* out_tm2 = out_tm0 + 2 * tiles_n;
float* out_tm3 = out_tm0 + 3 * tiles_n;
float* out_tm4 = out_tm0 + 4 * tiles_n;
float* out_tm5 = out_tm0 + 5 * tiles_n;
float* out_tm6 = out_tm0 + 6 * tiles_n;
float* out_tm7 = out_tm0 + 7 * tiles_n;
float* out_tm8 = out_tm0 + 8 * tiles_n;
#if __AVX__
__m256 _d0, _d1, _d2, _d3, _d4, _d5;
__m256 _w0, _w1, _w2, _w3, _w4, _w5;
__m256 _t0, _t1, _t2, _t3, _t4, _t5;
__m256 _n0, _n1, _n2, _n3, _n4, _n5;
// load
_d0 = _mm256_loadu_ps(r0);
_d1 = _mm256_loadu_ps(r1);
_d2 = _mm256_loadu_ps(r2);
_d3 = _mm256_loadu_ps(r3);
_d4 = _mm256_loadu_ps(r4);
_d5 = _mm256_loadu_ps(r5);
// w = B_t * d
_w0 = _mm256_mul_ps(_d0, _4_p);
_w0 = _mm256_fmadd_ps(_d2, _5_n, _w0);
_w0 = _mm256_add_ps(_w0, _d4);
_w1 = _mm256_mul_ps(_d1, _4_n);
_w1 = _mm256_fmadd_ps(_d2, _4_n, _w1);
_w1 = _mm256_add_ps(_w1, _d3);
_w1 = _mm256_add_ps(_w1, _d4);
_w2 = _mm256_mul_ps(_d1, _4_p);
_w2 = _mm256_fmadd_ps(_d2, _4_n, _w2);
_w2 = _mm256_fmadd_ps(_d3, _1_n, _w2);
_w2 = _mm256_add_ps(_w2, _d4);
_w3 = _mm256_mul_ps(_d1, _2_n);
_w3 = _mm256_fmadd_ps(_d2, _1_n, _w3);
_w3 = _mm256_fmadd_ps(_d3, _2_p, _w3);
_w3 = _mm256_add_ps(_w3, _d4);
_w4 = _mm256_mul_ps(_d1, _2_p);
_w4 = _mm256_fmadd_ps(_d2, _1_n, _w4);
_w4 = _mm256_fmadd_ps(_d3, _2_n, _w4);
_w4 = _mm256_add_ps(_w4, _d4);
_w5 = _mm256_mul_ps(_d1, _4_p);
_w5 = _mm256_fmadd_ps(_d3, _5_n, _w5);
_w5 = _mm256_add_ps(_w5, _d5);
// transpose d to d_t
#ifdef _WIN32
{
_t0.m256_f32[0] = _w0.m256_f32[0];
_t1.m256_f32[0] = _w0.m256_f32[1];
_t2.m256_f32[0] = _w0.m256_f32[2];
_t3.m256_f32[0] = _w0.m256_f32[3];
_t4.m256_f32[0] = _w0.m256_f32[4];
_t5.m256_f32[0] = _w0.m256_f32[5];
_t0.m256_f32[1] = _w1.m256_f32[0];
_t1.m256_f32[1] = _w1.m256_f32[1];
_t2.m256_f32[1] = _w1.m256_f32[2];
_t3.m256_f32[1] = _w1.m256_f32[3];
_t4.m256_f32[1] = _w1.m256_f32[4];
_t5.m256_f32[1] = _w1.m256_f32[5];
_t0.m256_f32[2] = _w2.m256_f32[0];
_t1.m256_f32[2] = _w2.m256_f32[1];
_t2.m256_f32[2] = _w2.m256_f32[2];
_t3.m256_f32[2] = _w2.m256_f32[3];
_t4.m256_f32[2] = _w2.m256_f32[4];
_t5.m256_f32[2] = _w2.m256_f32[5];
_t0.m256_f32[3] = _w3.m256_f32[0];
_t1.m256_f32[3] = _w3.m256_f32[1];
_t2.m256_f32[3] = _w3.m256_f32[2];
_t3.m256_f32[3] = _w3.m256_f32[3];
_t4.m256_f32[3] = _w3.m256_f32[4];
_t5.m256_f32[3] = _w3.m256_f32[5];
_t0.m256_f32[4] = _w4.m256_f32[0];
_t1.m256_f32[4] = _w4.m256_f32[1];
_t2.m256_f32[4] = _w4.m256_f32[2];
_t3.m256_f32[4] = _w4.m256_f32[3];
_t4.m256_f32[4] = _w4.m256_f32[4];
_t5.m256_f32[4] = _w4.m256_f32[5];
_t0.m256_f32[5] = _w5.m256_f32[0];
_t1.m256_f32[5] = _w5.m256_f32[1];
_t2.m256_f32[5] = _w5.m256_f32[2];
_t3.m256_f32[5] = _w5.m256_f32[3];
_t4.m256_f32[5] = _w5.m256_f32[4];
_t5.m256_f32[5] = _w5.m256_f32[5];
}
#else
{
_t0[0] = _w0[0];
_t1[0] = _w0[1];
_t2[0] = _w0[2];
_t3[0] = _w0[3];
_t4[0] = _w0[4];
_t5[0] = _w0[5];
_t0[1] = _w1[0];
_t1[1] = _w1[1];
_t2[1] = _w1[2];
_t3[1] = _w1[3];
_t4[1] = _w1[4];
_t5[1] = _w1[5];
_t0[2] = _w2[0];
_t1[2] = _w2[1];
_t2[2] = _w2[2];
_t3[2] = _w2[3];
_t4[2] = _w2[4];
_t5[2] = _w2[5];
_t0[3] = _w3[0];
_t1[3] = _w3[1];
_t2[3] = _w3[2];
_t3[3] = _w3[3];
_t4[3] = _w3[4];
_t5[3] = _w3[5];
_t0[4] = _w4[0];
_t1[4] = _w4[1];
_t2[4] = _w4[2];
_t3[4] = _w4[3];
_t4[4] = _w4[4];
_t5[4] = _w4[5];
_t0[5] = _w5[0];
_t1[5] = _w5[1];
_t2[5] = _w5[2];
_t3[5] = _w5[3];
_t4[5] = _w5[4];
_t5[5] = _w5[5];
}
#endif
// d = B_t * d_t
_n0 = _mm256_mul_ps(_t0, _4_p);
_n0 = _mm256_fmadd_ps(_t2, _5_n, _n0);
_n0 = _mm256_add_ps(_n0, _t4);
_n1 = _mm256_mul_ps(_t1, _4_n);
_n1 = _mm256_fmadd_ps(_t2, _4_n, _n1);
_n1 = _mm256_add_ps(_n1, _t3);
_n1 = _mm256_add_ps(_n1, _t4);
_n2 = _mm256_mul_ps(_t1, _4_p);
_n2 = _mm256_fmadd_ps(_t2, _4_n, _n2);
_n2 = _mm256_fmadd_ps(_t3, _1_n, _n2);
_n2 = _mm256_add_ps(_n2, _t4);
_n3 = _mm256_mul_ps(_t1, _2_n);
_n3 = _mm256_fmadd_ps(_t2, _1_n, _n3);
_n3 = _mm256_fmadd_ps(_t3, _2_p, _n3);
_n3 = _mm256_add_ps(_n3, _t4);
_n4 = _mm256_mul_ps(_t1, _2_p);
_n4 = _mm256_fmadd_ps(_t2, _1_n, _n4);
_n4 = _mm256_fmadd_ps(_t3, _2_n, _n4);
_n4 = _mm256_add_ps(_n4, _t4);
_n5 = _mm256_mul_ps(_t1, _4_p);
_n5 = _mm256_fmadd_ps(_t3, _5_n, _n5);
_n5 = _mm256_add_ps(_n5, _t5);
// save to out_tm
float output_n0[8] = {0.f};
_mm256_storeu_ps(output_n0, _n0);
float output_n1[8] = {0.f};
_mm256_storeu_ps(output_n1, _n1);
float output_n2[8] = {0.f};
_mm256_storeu_ps(output_n2, _n2);
float output_n3[8] = {0.f};
_mm256_storeu_ps(output_n3, _n3);
float output_n4[8] = {0.f};
_mm256_storeu_ps(output_n4, _n4);
float output_n5[8] = {0.f};
_mm256_storeu_ps(output_n5, _n5);
out_tm0[0] = output_n0[0];
out_tm0[1] = output_n0[1];
out_tm0[2] = output_n0[2];
out_tm0[3] = output_n0[3];
out_tm1[0] = output_n0[4];
out_tm1[1] = output_n0[5];
out_tm1[2] = output_n1[0];
out_tm1[3] = output_n1[1];
out_tm2[0] = output_n1[2];
out_tm2[1] = output_n1[3];
out_tm2[2] = output_n1[4];
out_tm2[3] = output_n1[5];
out_tm3[0] = output_n2[0];
out_tm3[1] = output_n2[1];
out_tm3[2] = output_n2[2];
out_tm3[3] = output_n2[3];
out_tm4[0] = output_n2[4];
out_tm4[1] = output_n2[5];
out_tm4[2] = output_n3[0];
out_tm4[3] = output_n3[1];
out_tm5[0] = output_n3[2];
out_tm5[1] = output_n3[3];
out_tm5[2] = output_n3[4];
out_tm5[3] = output_n3[5];
out_tm6[0] = output_n4[0];
out_tm6[1] = output_n4[1];
out_tm6[2] = output_n4[2];
out_tm6[3] = output_n4[3];
out_tm7[0] = output_n4[4];
out_tm7[1] = output_n4[5];
out_tm7[2] = output_n5[0];
out_tm7[3] = output_n5[1];
out_tm8[0] = output_n5[2];
out_tm8[1] = output_n5[3];
out_tm8[2] = output_n5[4];
out_tm8[3] = output_n5[5];
#else
float d0[6], d1[6], d2[6], d3[6], d4[6], d5[6];
float w0[6], w1[6], w2[6], w3[6], w4[6], w5[6];
float t0[6], t1[6], t2[6], t3[6], t4[6], t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4 * d0[n] - 5 * d2[n] + d4[n];
w1[n] = -4 * d1[n] - 4 * d2[n] + d3[n] + d4[n];
w2[n] = 4 * d1[n] - 4 * d2[n] - d3[n] + d4[n];
w3[n] = -2 * d1[n] - d2[n] + 2 * d3[n] + d4[n];
w4[n] = 2 * d1[n] - d2[n] - 2 * d3[n] + d4[n];
w5[n] = 4 * d1[n] - 5 * d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0] = w0[0];
t1[0] = w0[1];
t2[0] = w0[2];
t3[0] = w0[3];
t4[0] = w0[4];
t5[0] = w0[5];
t0[1] = w1[0];
t1[1] = w1[1];
t2[1] = w1[2];
t3[1] = w1[3];
t4[1] = w1[4];
t5[1] = w1[5];
t0[2] = w2[0];
t1[2] = w2[1];
t2[2] = w2[2];
t3[2] = w2[3];
t4[2] = w2[4];
t5[2] = w2[5];
t0[3] = w3[0];
t1[3] = w3[1];
t2[3] = w3[2];
t3[3] = w3[3];
t4[3] = w3[4];
t5[3] = w3[5];
t0[4] = w4[0];
t1[4] = w4[1];
t2[4] = w4[2];
t3[4] = w4[3];
t4[4] = w4[4];
t5[4] = w4[5];
t0[5] = w5[0];
t1[5] = w5[1];
t2[5] = w5[2];
t3[5] = w5[3];
t4[5] = w5[4];
t5[5] = w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4 * t0[n] - 5 * t2[n] + t4[n];
d1[n] = -4 * t1[n] - 4 * t2[n] + t3[n] + t4[n];
d2[n] = 4 * t1[n] - 4 * t2[n] - t3[n] + t4[n];
d3[n] = -2 * t1[n] - t2[n] + 2 * t3[n] + t4[n];
d4[n] = 2 * t1[n] - t2[n] - 2 * t3[n] + t4[n];
d5[n] = 4 * t1[n] - 5 * t3[n] + t5[n];
}
// save to out_tm
{
out_tm0[0] = d0[0];
out_tm0[1] = d0[1];
out_tm0[2] = d0[2];
out_tm0[3] = d0[3];
out_tm1[0] = d0[4];
out_tm1[1] = d0[5];
out_tm1[2] = d1[0];
out_tm1[3] = d1[1];
out_tm2[0] = d1[2];
out_tm2[1] = d1[3];
out_tm2[2] = d1[4];
out_tm2[3] = d1[5];
out_tm3[0] = d2[0];
out_tm3[1] = d2[1];
out_tm3[2] = d2[2];
out_tm3[3] = d2[3];
out_tm4[0] = d2[4];
out_tm4[1] = d2[5];
out_tm4[2] = d3[0];
out_tm4[3] = d3[1];
out_tm5[0] = d3[2];
out_tm5[1] = d3[3];
out_tm5[2] = d3[4];
out_tm5[3] = d3[5];
out_tm6[0] = d4[0];
out_tm6[1] = d4[1];
out_tm6[2] = d4[2];
out_tm6[3] = d4[3];
out_tm7[0] = d4[4];
out_tm7[1] = d4[5];
out_tm7[2] = d5[0];
out_tm7[3] = d5[1];
out_tm8[0] = d5[2];
out_tm8[1] = d5[3];
out_tm8[2] = d5[4];
out_tm8[3] = d5[5];
}
#endif // __AVX__
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
}
}
}
// BEGIN dot
float* top_blob_tm = NULL;
{
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
const int tiles_n = 36 * tiles;
top_blob_tm = dot_block;
#pragma omp parallel for num_threads(num_thread)
for (int r = 0; r < 9; r++)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp << 3;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
float* output4_tm = top_blob_tm + tiles_n * (p + 4);
float* output5_tm = top_blob_tm + tiles_n * (p + 5);
float* output6_tm = top_blob_tm + tiles_n * (p + 6);
float* output7_tm = top_blob_tm + tiles_n * (p + 7);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
output4_tm = output4_tm + r * 4;
output5_tm = output5_tm + r * 4;
output6_tm = output6_tm + r * 4;
output7_tm = output7_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + p / 8 * inch * 32;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
__m128 _sum4 = _mm_broadcast_ss(&zero_val);
__m128 _sum5 = _mm_broadcast_ss(&zero_val);
__m128 _sum6 = _mm_broadcast_ss(&zero_val);
__m128 _sum7 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
__m128 _sum4 = _mm_set1_ps(0.f);
__m128 _sum5 = _mm_set1_ps(0.f);
__m128 _sum6 = _mm_set1_ps(0.f);
__m128 _sum7 = _mm_set1_ps(0.f);
#endif
int q = 0;
for (; q + 3 < inch; q = q + 4)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _r1 = _mm_loadu_ps(r0 + 4);
__m128 _r2 = _mm_loadu_ps(r0 + 8);
__m128 _r3 = _mm_loadu_ps(r0 + 12);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r1, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r1, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r1, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r1, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r1, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r1, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r1, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r1, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r1, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r1, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r1, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r1, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r1, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r1, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r1, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r1, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r2, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r2, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r2, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r2, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r2, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r2, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r2, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r2, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r2, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r2, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r2, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r2, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r2, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r2, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r2, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r2, _k7));
#endif
kptr += 32;
_k0 = _mm_loadu_ps(kptr);
_k1 = _mm_loadu_ps(kptr + 4);
_k2 = _mm_loadu_ps(kptr + 8);
_k3 = _mm_loadu_ps(kptr + 12);
_k4 = _mm_loadu_ps(kptr + 16);
_k5 = _mm_loadu_ps(kptr + 20);
_k6 = _mm_loadu_ps(kptr + 24);
_k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r3, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r3, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r3, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r3, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r3, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r3, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r3, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r3, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r3, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r3, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r3, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r3, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r3, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r3, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r3, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r3, _k7));
#endif
kptr += 32;
r0 += 16;
}
for (; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
__m128 _k4 = _mm_loadu_ps(kptr + 16);
__m128 _k5 = _mm_loadu_ps(kptr + 20);
__m128 _k6 = _mm_loadu_ps(kptr + 24);
__m128 _k7 = _mm_loadu_ps(kptr + 28);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
_sum4 = _mm_fmadd_ps(_r0, _k4, _sum4);
_sum5 = _mm_fmadd_ps(_r0, _k5, _sum5);
_sum6 = _mm_fmadd_ps(_r0, _k6, _sum6);
_sum7 = _mm_fmadd_ps(_r0, _k7, _sum7);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
_sum4 = _mm_add_ps(_sum4, _mm_mul_ps(_r0, _k4));
_sum5 = _mm_add_ps(_sum5, _mm_mul_ps(_r0, _k5));
_sum6 = _mm_add_ps(_sum6, _mm_mul_ps(_r0, _k6));
_sum7 = _mm_add_ps(_sum7, _mm_mul_ps(_r0, _k7));
#endif
kptr += 32;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
_mm_storeu_ps(output4_tm, _sum4);
_mm_storeu_ps(output5_tm, _sum5);
_mm_storeu_ps(output6_tm, _sum6);
_mm_storeu_ps(output7_tm, _sum7);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
float sum4[4] = {0};
float sum5[4] = {0};
float sum6[4] = {0};
float sum7[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
sum4[n] += r0[n] * kptr[n + 16];
sum5[n] += r0[n] * kptr[n + 20];
sum6[n] += r0[n] * kptr[n + 24];
sum7[n] += r0[n] * kptr[n + 28];
}
kptr += 32;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
output4_tm[n] = sum4[n];
output5_tm[n] = sum5[n];
output6_tm[n] = sum6[n];
output7_tm[n] = sum7[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
output4_tm += 36;
output5_tm += 36;
output6_tm += 36;
output7_tm += 36;
}
}
nn_outch = (outch - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm + tiles_n * p;
float* output1_tm = top_blob_tm + tiles_n * (p + 1);
float* output2_tm = top_blob_tm + tiles_n * (p + 2);
float* output3_tm = top_blob_tm + tiles_n * (p + 3);
output0_tm = output0_tm + r * 4;
output1_tm = output1_tm + r * 4;
output2_tm = output2_tm + r * 4;
output3_tm = output3_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr = kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4) * inch * 16;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
__m128 _sum1 = _mm_broadcast_ss(&zero_val);
__m128 _sum2 = _mm_broadcast_ss(&zero_val);
__m128 _sum3 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
__m128 _k1 = _mm_loadu_ps(kptr + 4);
__m128 _k2 = _mm_loadu_ps(kptr + 8);
__m128 _k3 = _mm_loadu_ps(kptr + 12);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
_sum1 = _mm_fmadd_ps(_r0, _k1, _sum1);
_sum2 = _mm_fmadd_ps(_r0, _k2, _sum2);
_sum3 = _mm_fmadd_ps(_r0, _k3, _sum3);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_r0, _k1));
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_r0, _k2));
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_r0, _k3));
#endif
kptr += 16;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
_mm_storeu_ps(output1_tm, _sum1);
_mm_storeu_ps(output2_tm, _sum2);
_mm_storeu_ps(output3_tm, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
sum1[n] += r0[n] * kptr[n + 4];
sum2[n] += r0[n] * kptr[n + 8];
sum3[n] += r0[n] * kptr[n + 12];
}
kptr += 16;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
#endif // __AVX__
output0_tm += 36;
output1_tm += 36;
output2_tm += 36;
output3_tm += 36;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm + 36 * tiles * p;
output0_tm = output0_tm + r * 4;
for (int i = 0; i < tiles; i++)
{
const float* kptr =
kernel_tm_test + 4 * r * inch * outch + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
const float* r0 = bottom_blob_tm + 4 * inch * (tiles * r + i);
#if __AVX__ || __SSE__
#if __AVX__
float zero_val = 0.f;
__m128 _sum0 = _mm_broadcast_ss(&zero_val);
#else
__m128 _sum0 = _mm_set1_ps(0.f);
#endif
for (int q = 0; q < inch; q++)
{
__m128 _r0 = _mm_loadu_ps(r0);
__m128 _k0 = _mm_loadu_ps(kptr);
#if __AVX__
_sum0 = _mm_fmadd_ps(_r0, _k0, _sum0);
#else
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_r0, _k0));
#endif
kptr += 4;
r0 += 4;
}
_mm_storeu_ps(output0_tm, _sum0);
#else
float sum0[4] = {0};
for (int q = 0; q < inch; q++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += r0[n] * kptr[n];
}
kptr += 4;
r0 += 4;
}
for (int n = 0; n < 4; n++)
{
output0_tm[n] = sum0[n];
}
#endif // __AVX__ || __SSE__
output0_tm += 36;
}
}
}
}
// END dot
// BEGIN transform output
float* top_blob_bordered = NULL;
if (outw_align == outw && outh_align == outh)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered = output_bordered;
}
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw_align / 4 * 6;
int h_tm = outh_align / 4 * 6;
int nColBlocks = h_tm / 6; // may be the block num in Feathercnn
int nRowBlocks = w_tm / 6;
const int tiles = nColBlocks * nRowBlocks;
#pragma omp parallel for num_threads(num_thread)
for (int p = 0; p < outch; p++)
{
float* out_tile = top_blob_tm + 36 * tiles * p;
float* outRow0 = top_blob_bordered + outw_align * outh_align * p;
float* outRow1 = outRow0 + outw_align;
float* outRow2 = outRow0 + outw_align * 2;
float* outRow3 = outRow0 + outw_align * 3;
const float bias0 = bias ? bias[p] : 0.f;
for (int j = 0; j < nColBlocks; j++)
{
for (int i = 0; i < nRowBlocks; i++)
{
// TODO AVX2
float s0[6], s1[6], s2[6], s3[6], s4[6], s5[6];
float w0[6], w1[6], w2[6], w3[6];
float d0[4], d1[4], d2[4], d3[4], d4[4], d5[4];
float o0[4], o1[4], o2[4], o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n + 6];
s2[n] = out_tile[n + 12];
s3[n] = out_tile[n + 18];
s4[n] = out_tile[n + 24];
s5[n] = out_tile[n + 30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2 * s3[n] - 2 * s4[n];
w2[n] = s1[n] + s2[n] + 4 * s3[n] + 4 * s4[n];
w3[n] = s1[n] - s2[n] + 8 * s3[n] - 8 * s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0];
d0[1] = w1[0];
d0[2] = w2[0];
d0[3] = w3[0];
d1[0] = w0[1];
d1[1] = w1[1];
d1[2] = w2[1];
d1[3] = w3[1];
d2[0] = w0[2];
d2[1] = w1[2];
d2[2] = w2[2];
d2[3] = w3[2];
d3[0] = w0[3];
d3[1] = w1[3];
d3[2] = w2[3];
d3[3] = w3[3];
d4[0] = w0[4];
d4[1] = w1[4];
d4[2] = w2[4];
d4[3] = w3[4];
d5[0] = w0[5];
d5[1] = w1[5];
d5[2] = w2[5];
d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2 * d3[n] - 2 * d4[n];
o2[n] = d1[n] + d2[n] + 4 * d3[n] + 4 * d4[n];
o3[n] = d1[n] - d2[n] + 8 * d3[n] - 8 * d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] + bias0;
outRow1[n] = o1[n] + bias0;
outRow2[n] = o2[n] + bias0;
outRow3[n] = o3[n] + bias0;
}
out_tile += 36;
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
outRow0 += outw_align * 3;
outRow1 += outw_align * 3;
outRow2 += outw_align * 3;
outRow3 += outw_align * 3;
}
}
}
// END transform output
if (outw_align != outw || outh_align != outw)
{
delete_0_3D(top_blob, top_blob_bordered, outh_align, outw_align, outh, outw, outch, 0, 0);
}
}
void conv3x3s1_winograd43_transform_kernel_sse(const float* kernel, float* kernel_wino, int inch, int outch)
{
float* kernel_tm = ( float* )sys_malloc((unsigned long)6 * 6 * inch * outch * sizeof(float));
// G
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f}, {-1.0f / 6, -1.0f / 6, -1.0f / 6}, {-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6}, {1.0f / 24, -1.0f / 12, 1.0f / 6}, {0.0f, 0.0f, 1.0f}};
#pragma omp parallel for
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm + p * inch * 36 + q * 36;
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3] = {0};
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
float* kernel_tm_test = kernel_wino;
for (int r = 0; r < 9; r++)
{
int p = 0;
for (; p + 7 < outch; p += 8)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
const float* kernel4 = ( const float* )kernel_tm + (p + 4) * inch * 36;
const float* kernel5 = ( const float* )kernel_tm + (p + 5) * inch * 36;
const float* kernel6 = ( const float* )kernel_tm + (p + 6) * inch * 36;
const float* kernel7 = ( const float* )kernel_tm + (p + 7) * inch * 36;
float* ktmp = kernel_tm_test + p / 8 * inch * 32;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp[16] = kernel4[r * 4 + 0];
ktmp[17] = kernel4[r * 4 + 1];
ktmp[18] = kernel4[r * 4 + 2];
ktmp[19] = kernel4[r * 4 + 3];
ktmp[20] = kernel5[r * 4 + 0];
ktmp[21] = kernel5[r * 4 + 1];
ktmp[22] = kernel5[r * 4 + 2];
ktmp[23] = kernel5[r * 4 + 3];
ktmp[24] = kernel6[r * 4 + 0];
ktmp[25] = kernel6[r * 4 + 1];
ktmp[26] = kernel6[r * 4 + 2];
ktmp[27] = kernel6[r * 4 + 3];
ktmp[28] = kernel7[r * 4 + 0];
ktmp[29] = kernel7[r * 4 + 1];
ktmp[30] = kernel7[r * 4 + 2];
ktmp[31] = kernel7[r * 4 + 3];
ktmp += 32;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
kernel4 += 36;
kernel5 += 36;
kernel6 += 36;
kernel7 += 36;
}
}
for (; p + 3 < outch; p += 4)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
const float* kernel1 = ( const float* )kernel_tm + (p + 1) * inch * 36;
const float* kernel2 = ( const float* )kernel_tm + (p + 2) * inch * 36;
const float* kernel3 = ( const float* )kernel_tm + (p + 3) * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4) * inch * 16;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp[4] = kernel1[r * 4 + 0];
ktmp[5] = kernel1[r * 4 + 1];
ktmp[6] = kernel1[r * 4 + 2];
ktmp[7] = kernel1[r * 4 + 3];
ktmp[8] = kernel2[r * 4 + 0];
ktmp[9] = kernel2[r * 4 + 1];
ktmp[10] = kernel2[r * 4 + 2];
ktmp[11] = kernel2[r * 4 + 3];
ktmp[12] = kernel3[r * 4 + 0];
ktmp[13] = kernel3[r * 4 + 1];
ktmp[14] = kernel3[r * 4 + 2];
ktmp[15] = kernel3[r * 4 + 3];
ktmp += 16;
kernel0 += 36;
kernel1 += 36;
kernel2 += 36;
kernel3 += 36;
}
}
for (; p < outch; p++)
{
const float* kernel0 = ( const float* )kernel_tm + p * inch * 36;
float* ktmp = kernel_tm_test + (p / 8 + (p % 8) / 4 + p % 4) * inch * 4;
for (int q = 0; q < inch; q++)
{
ktmp[0] = kernel0[r * 4 + 0];
ktmp[1] = kernel0[r * 4 + 1];
ktmp[2] = kernel0[r * 4 + 2];
ktmp[3] = kernel0[r * 4 + 3];
ktmp += 4;
kernel0 += 36;
}
}
kernel_tm_test += 4 * inch * outch;
}
free(kernel_tm);
}
int wino_conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param)
{
int batch = input_tensor->dims[0];
int input_c = input_tensor->dims[1];
int input_h = input_tensor->dims[2];
int input_w = input_tensor->dims[3];
int output_c = output_tensor->dims[1];
int output_h = output_tensor->dims[2];
int output_w = output_tensor->dims[3];
int pad_h = param->pad_h0;
int pad_w = param->pad_w0;
float* kernel = ( float* )filter_tensor->data;
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
int block_h = (output_h + TILE - 1) / TILE;
int block_w = (output_w + TILE - 1) / TILE;
int block = block_h * block_w;
int padded_inh = TILE * block_h + 2;
int padded_inw = TILE * block_w + 2;
int pad_inhw = padded_inh * padded_inw;
int outw = block_w * TILE;
int outh = block_h * TILE;
priv_info->input_pad = ( float* )sys_malloc((unsigned long)batch * input_c * pad_inhw * sizeof(float));
memset(priv_info->input_pad, 0, (unsigned long)batch * input_c * pad_inhw * sizeof(float));
priv_info->dot_block = ( float* )sys_malloc(ELEM_SIZE * (unsigned long)block * output_c * sizeof(float));
priv_info->transform_input = ( float* )sys_malloc(ELEM_SIZE * (unsigned long)block * input_c * sizeof(float));
priv_info->output_bordered = NULL;
if (outw != output_w || outh != output_h)
{
priv_info->output_bordered = ( float* )sys_malloc((unsigned long)outw * outh * output_c * sizeof(float));
}
conv3x3s1_winograd43_transform_kernel_sse(kernel, ( float* )priv_info->interleave_buffer, input_c, output_c);
return 0;
}
int wino_conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
if (priv_info->input_pad)
{
sys_free(priv_info->input_pad);
priv_info->input_pad = NULL;
}
if (priv_info->dot_block)
{
sys_free(priv_info->dot_block);
priv_info->dot_block = NULL;
}
if (priv_info->transform_input)
{
sys_free(priv_info->transform_input);
priv_info->transform_input = NULL;
}
if (priv_info->output_bordered)
{
sys_free(priv_info->output_bordered);
priv_info->output_bordered = NULL;
}
return 0;
}
int wino_conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
/* param */
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int pad_h0 = param->pad_h0;
int pad_w0 = param->pad_w0;
int act_type = param->activation;
int group = param->group;
int batch = input_tensor->dims[0];
int in_c = input_tensor->dims[1];
int in_c_g = input_tensor->dims[1] / group;
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
int input_size = in_c * in_h * in_w;
int input_size_g = in_c_g * in_h * in_w;
int kernel_size = in_c * kernel_h * kernel_w;
int out_c = output_tensor->dims[1];
int out_h = output_tensor->dims[2];
int out_w = output_tensor->dims[3];
int out_hw = out_h * out_w;
int output_size = out_c * out_h * out_w;
int out_c_align = ((out_c + 3) & -4);
/* wino param */
int block_h = (out_h + TILE - 1) / TILE;
int block_w = (out_w + TILE - 1) / TILE;
int block_hw = block_h * block_w;
int padded_in_h = block_h * TILE + 2;
int padded_in_w = block_w * TILE + 2;
int padded_in_hw = padded_in_h * padded_in_w;
/* buffer addr */
float* input = ( float* )input_tensor->data;
float* output = ( float* )output_tensor->data;
float* biases = NULL;
if (bias_tensor != NULL)
biases = ( float* )bias_tensor->data;
for (int i = 0; i < batch; i++)
{
for (int g = 0; g < group; g++)
{
pad_0_align_3D((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w, input + i * in_c * in_h * in_w,
in_h, in_w, padded_in_h, padded_in_w, in_c, pad_h0, pad_w0);
conv3x3s1_winograd43_sse((float*)priv_info->input_pad + i * in_c * padded_in_h * padded_in_w + g * input_size_g,
output + i * out_c * out_h * out_w, priv_info->interleave_buffer,
priv_info->dot_block, priv_info->transform_input, priv_info->output_bordered,
biases, padded_in_w, padded_in_h, in_c, out_w, out_h, out_c, num_thread);
}
}
if (act_type >= 0)
{
relu(output, batch * output_size, act_type);
}
return 0;
} |
convolution_3x3_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 16b-16a-inch/16a-64-outch/16b
kernel_tm_pack8.create(inch / 16, 64, outch / 16, (size_t)4u * 16 * 16, 16 * 16);
int q = 0;
for (; q + 15 < outch; q += 16)
{
Mat g0 = kernel_tm_pack8.channel(q / 16);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd64_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd64_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x12
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(r0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(r0 + 16 * 9);
__m512 _ra = _mm512_load_ps(r0 + 16 * 10);
__m512 _rb = _mm512_load_ps(r0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
_mm512_store_ps(tmpptr + 16 * 8, _r8);
_mm512_store_ps(tmpptr + 16 * 9, _r9);
_mm512_store_ps(tmpptr + 16 * 10, _ra);
_mm512_store_ps(tmpptr + 16 * 11, _rb);
tmpptr += 192;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x8
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
tmpptr += 128;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x4
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
tmpptr += 64;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x2
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
tmpptr += 32;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
__m512 _val = _mm512_load_ps(r0);
_mm512_store_ps(tmpptr, _val);
tmpptr += 16;
r0 += bottom_blob_tm.cstep * 16;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
__m512 _sum8 = _mm512_setzero_ps();
__m512 _sum9 = _mm512_setzero_ps();
__m512 _suma = _mm512_setzero_ps();
__m512 _sumb = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(r0[8]);
__m512 _val9 = _mm512_set1_ps(r0[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(r0[10]);
__m512 _valb = _mm512_set1_ps(r0[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
_mm512_store_ps(output0_tm + 16 * 8, _sum8);
_mm512_store_ps(output0_tm + 16 * 9, _sum9);
_mm512_store_ps(output0_tm + 16 * 10, _suma);
_mm512_store_ps(output0_tm + 16 * 11, _sumb);
output0_tm += 16 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
output0_tm += 16 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
output0_tm += 16 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
output0_tm += 16 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd64_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd42_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd42 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 16b-16a-inch/16a-36-outch/16b
kernel_tm_pack4.create(inch / 16, 36, outch / 16, (size_t)4u * 16 * 16, 16 * 16);
for (int q = 0; q + 15 < outch; q += 16)
{
Mat g0 = kernel_tm_pack4.channel(q / 16);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd42_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd42_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x12
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(r0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(r0 + 16 * 9);
__m512 _ra = _mm512_load_ps(r0 + 16 * 10);
__m512 _rb = _mm512_load_ps(r0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
_mm512_store_ps(tmpptr + 16 * 8, _r8);
_mm512_store_ps(tmpptr + 16 * 9, _r9);
_mm512_store_ps(tmpptr + 16 * 10, _ra);
_mm512_store_ps(tmpptr + 16 * 11, _rb);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 192;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x8
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 128;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x4
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 64;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x2
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 32;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
__m512 _val = _mm512_load_ps(r0);
_mm512_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 16;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
__m512 _sum8 = _mm512_setzero_ps();
__m512 _sum9 = _mm512_setzero_ps();
__m512 _suma = _mm512_setzero_ps();
__m512 _sumb = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(r0[8]);
__m512 _val9 = _mm512_set1_ps(r0[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(r0[10]);
__m512 _valb = _mm512_set1_ps(r0[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
_mm512_store_ps(output0_tm + 16 * 8, _sum8);
_mm512_store_ps(output0_tm + 16 * 9, _sum9);
_mm512_store_ps(output0_tm + 16 * 10, _suma);
_mm512_store_ps(output0_tm + 16 * 11, _sumb);
output0_tm += 16 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
output0_tm += 16 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
output0_tm += 16 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
output0_tm += 16 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd42_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2015 by Contributors
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <nnvm/node.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/storage.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
#include <limits>
#include "../operator/mxnet_op.h"
#if MXNET_USE_MKLDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
#include <windows.h>
#else
#include <unistd.h>
#endif
namespace mxnet {
namespace common {
#if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__)
inline size_t current_process_id() { return ::GetCurrentProcessId(); }
#else
inline size_t current_process_id() { return getpid(); }
#endif
/*!
* \brief IndPtr should be non-negative, in non-decreasing order, start with 0
* and end with value equal with size of indices.
*/
struct csr_indptr_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr,
const nnvm::dim_t end, const nnvm::dim_t idx_size) {
if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] ||
(i == 0 && indptr[i] != 0) ||
(i == end - 1 && indptr[end] != idx_size))
*out = kCSRIndPtrErr;
}
};
/*!
* \brief Indices should be non-negative, less than the number of columns
* and in ascending order per row.
*/
struct csr_idx_check {
template<typename DType, typename IType, typename RType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const RType* indptr, const nnvm::dim_t ncols) {
for (RType j = indptr[i]; j < indptr[i+1]; j++) {
if (idx[j] >= ncols || idx[j] < 0 ||
(j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) {
*out = kCSRIdxErr;
break;
}
}
}
};
/*!
* \brief Indices of RSPNDArray should be non-negative,
* less than the size of first dimension and in ascending order
*/
struct rsp_idx_check {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx,
const nnvm::dim_t end, const nnvm::dim_t nrows) {
if ((i < end && idx[i+1] <= idx[i])
|| idx[i] < 0 || idx[i] >= nrows)
*out = kRSPIdxErr;
}
};
template<typename xpu>
void CheckFormatWrapper(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check);
/*!
* \brief Check the validity of CSRNDArray.
* \param rctx Execution context.
* \param input Input NDArray of CSRStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kCSRStorage)
<< "CheckFormatCSRImpl is for CSRNDArray";
const mxnet::TShape shape = input.shape();
const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx);
const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr);
const mxnet::TShape storage_shape = input.storage_shape();
if ((shape.ndim() != 2) ||
(idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) ||
(indptr_shape[0] != shape[0] + 1) ||
(idx_shape[0] != storage_shape[0])) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kCSRShapeErr;
});
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(),
indptr_shape[0] - 1, idx_shape[0]);
// no need to check indices if indices are empty
if (idx_shape[0] != 0) {
Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(),
input.aux_data(csr::kIdx).dptr<IType>(),
input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]);
}
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
});
}
}
/*!
* \brief Check the validity of RowSparseNDArray.
* \param rctx Execution context.
* \param input Input NDArray of RowSparseStorage.
* \param err_cpu Error number on cpu.
* \param full_check If true, rigorous check, O(N) operations,
* otherwise basic check, O(1) operations.
*/
template<typename xpu>
void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
using namespace op::mxnet_op;
CHECK_EQ(input.storage_type(), kRowSparseStorage)
<< "CheckFormatRSPImpl is for RSPNDArray";
const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx);
if (idx_shape[0] != input.storage_shape()[0]) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
DType* err = err_cpu.dptr<DType>();
*err = kRSPShapeErr;
});
return;
}
if (idx_shape[0] == 0) {
return;
}
if (full_check) {
MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, {
mshadow::Stream<xpu> *s = rctx.get_stream<xpu>();
NDArray ret_xpu = NDArray(mshadow::Shape1(1),
rctx.get_ctx(), false, err_cpu.type_flag_);
TBlob val_xpu = ret_xpu.data();
Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>());
Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0],
val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(),
idx_shape[0] - 1, input.shape()[0]);
mshadow::Copy(err_cpu.get<cpu, 1, DType>(),
val_xpu.get<xpu, 1, DType>(s), s);
});
});
}
}
template<typename xpu>
void CheckFormatImpl(const RunContext &rctx, const NDArray &input,
const TBlob &err_cpu, const bool full_check) {
int stype = input.storage_type();
if (stype == kCSRStorage) {
CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kRowSparseStorage) {
CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check);
} else if (stype == kDefaultStorage) {
// no-op for default storage
} else {
LOG(FATAL) << "Unknown storage type " << stype;
}
}
/*! \brief Pick rows specified by user input index array from a row sparse ndarray
* and save them in the output sparse ndarray.
*/
template<typename xpu>
void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s,
const NDArray& input_nd,
const TBlob& idx_data,
const OpReqType req,
NDArray* output_nd);
/* \brief Casts tensor storage type to the new type.
*/
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if storage type of any array in `ndarrays`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() == stype) {
return true;
}
}
}
return false;
}
/*! \brief returns true if any storage type `ndstype` in `ndstypes`
* is the same as the target `stype`. false is returned for empty inputs.
*/
inline bool ContainsStorageType(const std::vector<int>& ndstypes,
const NDArrayStorageType stype) {
if (!ndstypes.empty()) {
for (const auto& ndstype : ndstypes) {
if (ndstype == stype) {
return true;
}
}
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
/*! \brief get string representation of device type */
inline std::string dev_type_string(const int dev_type) {
switch (dev_type) {
case Context::kCPU:
return "cpu";
case Context::kGPU:
return "gpu";
case Context::kCPUPinned:
return "cpu_pinned";
case Context::kCPUShared:
return "cpu_shared";
}
return "unknown";
}
inline std::string attr_value_string(const nnvm::NodeAttrs& attrs,
const std::string& attr_name,
std::string default_val = "") {
if (attrs.dict.find(attr_name) == attrs.dict.end()) {
return default_val;
}
return attrs.dict.at(attr_name);
}
/*! \brief get string representation of the operator stypes */
inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>& in_attrs,
const std::vector<int>& out_attrs) {
std::ostringstream os;
os << "operator = " << attrs.op->name
<< "\ninput storage types = [";
for (const int attr : in_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "output storage types = [";
for (const int attr : out_attrs) {
os << stype_string(attr) << ", ";
}
os << "]\n"
<< "params = {";
for (auto kv : attrs.dict) {
os << "\"" << kv.first << "\" : " << kv.second << ", ";
}
os << "}\n"
<< "context.dev_mask = " << dev_type_string(dev_mask);
return os.str();
}
/*! \brief get string representation of the operator */
inline std::string operator_string(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
std::string result = "";
std::vector<int> in_stypes;
std::vector<int> out_stypes;
in_stypes.reserve(inputs.size());
out_stypes.reserve(outputs.size());
auto xform = [](const NDArray arr) -> int { return arr.storage_type(); };
std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform);
std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform);
result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes);
return result;
}
/*! \brief log message once. Intended for storage fallback warning messages. */
inline void LogOnce(const std::string& message) {
typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore;
auto log_store = LogStore::Get();
if (log_store->find(message) == log_store->end()) {
LOG(INFO) << message;
log_store->insert(message);
}
}
/*! \brief log storage fallback event
*/
inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
const int dev_mask,
const std::vector<int>* in_attrs,
const std::vector<int>* out_attrs) {
static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true);
if (!log) return;
const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs);
std::ostringstream os;
const char* warning = "\nThe operator with default storage type will be dispatched "
"for execution. You're seeing this warning message because the operator above is unable "
"to process the given ndarrays with specified storage types, context and parameter. "
"Temporary dense ndarrays are generated in order to execute the operator. "
"This does not affect the correctness of the programme. "
"You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to "
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
#if MXNET_USE_MKLDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
"Should only be set if "
"your model has variable input shapes, "
"as cache size may grow unbounded");
#endif
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadsPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadsPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask " << ctx.dev_mask();
return nullptr;
}
}
/*!
* \brief Return the max integer value representable in the type `T` without loss of precision.
*/
template <typename T>
constexpr size_t MaxIntegerValue() {
return std::is_integral<T>::value ?
std::numeric_limits<T>::max():
size_t(2) << (std::numeric_limits<T>::digits - 1);
}
template <>
constexpr size_t MaxIntegerValue<mshadow::half::half_t>() {
return size_t(2) << 10;
}
template <>
constexpr size_t MaxIntegerValue<mshadow::bfloat::bf16_t>() {
return size_t(2) << 14;
}
MSHADOW_XINLINE int ilog2ul(size_t a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
MSHADOW_XINLINE int ilog2ui(unsigned int a) {
int k = 1;
while (a >>= 1) ++k;
return k;
}
/*!
* \brief Return an NDArray of all zeros.
*/
inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape,
const Context &ctx, const int dtype) {
// NDArray with default storage
if (stype == kDefaultStorage) {
NDArray ret(shape, ctx, false, dtype);
ret = 0;
return ret;
}
// NDArray with non-default storage. Storage allocation is always delayed.
return NDArray(stype, shape, ctx, true, dtype);
}
/*!
* \brief Helper to add a NDArray of zeros to a std::vector.
*/
inline void EmplaceBackZeros(const NDArrayStorageType stype,
const mxnet::TShape &shape,
const Context &ctx,
const int dtype,
std::vector<NDArray> *vec) {
// NDArray with default storage
if (stype == kDefaultStorage) {
vec->emplace_back(shape, ctx, false, dtype);
vec->back() = 0;
} else {
// NDArray with non-default storage. Storage allocation is always delayed.
vec->emplace_back(stype, shape, ctx, true, dtype);
}
}
/*!
* \brief parallelize copy by OpenMP.
*/
template<typename DType>
inline void ParallelCopy(DType* dst, const DType* src, index_t size) {
static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= copy_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] = src[i];
}
} else {
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
std::memcpy(dst, src, sizeof(DType) * size);
#pragma GCC diagnostic pop
}
}
/*!
* \breif parallelize add by OpenMP
*/
template<typename DType>
inline void ParallelAdd(DType* dst, const DType* src, index_t size) {
static index_t add_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_SIZE", 200000);
if (size >= add_block_size) {
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
} else {
for (index_t i = 0; i < size; ++i) {
dst[i] += src[i];
}
}
}
/*!
* \brief If numpy compatibility is turned off (default), the shapes passed in
* by users follow the legacy shape definition:
* 1. 0 ndim means the shape is completely unknown.
* 2. 0 dim size means the dim size is unknown.
* We need to convert those shapes to use the numpy shape definition:
* 1. 0 ndim means it's a scalar tensor.
* 2. -1 ndim means the shape is unknown.
* 3. 0 dim size means no elements in that dimension.
* 4. -1 dim size means the dimension's size is unknown.
* so that operator's infer shape function can work in backend.
* \param shape to be converted.
* Note: It is possible that the shape to be converted is already
* numpy compatible. For example, when a subgraph operator's infer
* shape function is called from the infer shape pass of the whole
* graph, its input/output shapes have been converted to numpy
* compatible shapes.
*/
inline void ConvertToNumpyShape(mxnet::TShape* shape) {
if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown
*shape = mxnet::TShape(); // unknown shape ndim = -1
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown
(*shape)[j] = -1; // unknown dim size = -1
}
}
}
}
inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToNumpyShape(&(shapes->at(i)));
}
}
/*!
* \brief This is function is used to convert shapes returned by
* the infer shape functions/pass to the legacy shape definition.
*/
inline void ConvertToLegacyShape(mxnet::TShape* shape) {
if (!mxnet::ndim_is_known(*shape)) {
*shape = mxnet::TShape(0, -1);
} else {
for (int j = 0; j < shape->ndim(); ++j) {
if (!mxnet::dim_size_is_known(*shape, j)) {
(*shape)[j] = 0;
}
}
}
}
inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) {
for (size_t i = 0; i < shapes->size(); ++i) {
ConvertToLegacyShape(&(shapes->at(i)));
}
}
void ExecuteMonInputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
void ExecuteMonOutputCallback(
const nnvm::IndexedGraph &idx, const std::vector<NDArray *> &state_arrays,
size_t nid, const std::function<void(const char *, const char *, void *)>
&monitor_callback);
/*!
* \brief This is function can return the output names of a NodeEntry.
*/
static inline std::string GetOutputName(const nnvm::NodeEntry& e) {
nnvm::Symbol sym;
sym.outputs.push_back(e);
return sym.ListOutputNames()[0];
}
inline mxnet::TShape CanonicalizeAxes(const mxnet::TShape& src) {
// convert negative axes to positive values
const int ndim = src.ndim();
mxnet::TShape axes = src;
for (int i = 0; i < ndim; ++i) {
if (axes[i] < 0) {
axes[i] += ndim;
}
CHECK(axes[i] >= 0 && axes[i] < ndim) << "axes[" << i << "]="
<< axes[i] << " exceeds the range ["
<< 0 << ", " << ndim << ")";
}
return axes;
}
inline bool is_float(const int dtype) {
return dtype == mshadow::kFloat32 || dtype == mshadow::kFloat64 || dtype == mshadow::kFloat16;
}
inline bool is_int(const int dtype) {
return dtype == mshadow::kUint8 || dtype == mshadow::kInt8 ||
dtype == mshadow::kInt32 || dtype == mshadow::kInt64;
}
inline int get_more_precise_type(const int type1, const int type2) {
if (type1 == type2) return type1;
if (is_float(type1) && is_float(type2)) {
if (type1 == mshadow::kFloat64 || type2 == mshadow::kFloat64) {
return mshadow::kFloat64;
}
if (type1 == mshadow::kFloat32 || type2 == mshadow::kFloat32) {
return mshadow::kFloat32;
}
return mshadow::kFloat16;
} else if (is_float(type1) || is_float(type2)) {
return is_float(type1) ? type1 : type2;
}
if (type1 == mshadow::kInt64 || type2 == mshadow::kInt64) {
return mshadow::kInt64;
}
if (type1 == mshadow::kInt32 || type2 == mshadow::kInt32) {
return mshadow::kInt32;
}
CHECK(!((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)))
<< "1 is UInt8 and 1 is Int8 should not get here";
if (type1 == mshadow::kUint8 || type2 == mshadow::kUint8) {
return mshadow::kUint8;
}
return mshadow::kInt8;
}
inline int np_binary_out_infer_type(const int type1, const int type2) {
if ((type1 == mshadow::kUint8 && type2 == mshadow::kInt8) ||
(type1 == mshadow::kInt8 && type2 == mshadow::kUint8)) {
return mshadow::kInt32;
}
return get_more_precise_type(type1, type2);
}
inline const std::string
NodeAttrsGetProfilerScope(const nnvm::NodeAttrs& attrs) {
// obtain the profiler scope name, if assigned previously
std::string profiler_scope = MXNET_STORAGE_DEFAULT_PROFILER_SCOPE_CSTR;
const std::unordered_map<std::string, std::string>& node_attrs_dict = attrs.dict;
const std::unordered_map<std::string, std::string>::const_iterator
profiler_scope_iter = node_attrs_dict.find("__profiler_scope__");
if (profiler_scope_iter != node_attrs_dict.end()) {
profiler_scope = profiler_scope_iter->second;
}
return profiler_scope;
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
GB_unop__round_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__round_fc32_fc32
// op(A') function: GB_unop_tran__round_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = GB_croundf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_croundf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = GB_croundf (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ROUND || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__round_fc32_fc32
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = GB_croundf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__round_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr58472.c | /* PR tree-optimization/58472 */
/* { dg-do compile } */
/* { dg-options "-O2 -Wall -fopenmp" } */
float a[1024], b[1024];
float
foo ()
{
float s = 0.f;
unsigned int i;
#pragma omp simd reduction(+:s)
for (i = 0; i < 1024; ++i)
s += a[i] * b[i];
return s;
}
|
nukedclan_fmt_plug.c | /* Nuked-Klan CMS DB cracker patch for JtR. Hacked together during
* July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:$nk$*HASHKEY*hash
*
* Where,
*
* HASHKEY => hex(HASHKEY value found in conf.inc.php)
*
* Modified by JimF, Jul 2012. About 6x speed improvements.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_nk;
#elif FMT_REGISTERS_H
john_register_one(&fmt_nk);
#else
#include <string.h>
#include "arch.h"
#include "md5.h"
#include "sha.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "common.h"
#ifdef _OPENMP
#include <omp.h>
// Tuned on core i7 quad HT
// 1 5059K
// 16 8507k
// 64 8907k ** this was chosen.
// 128 8914k
// 256 8810k
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "nk"
#define FORMAT_NAME "Nuked-Klan CMS"
#define FORMAT_TAG "$nk$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "SHA1 MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1 /* change to 0 once there's any speedup for "many salts" */
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH (4+32+40+3+1)
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 64
static struct fmt_tests nk_tests[] = {
{"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#17737d3661312121d5ae7d5c6156c0298", "openwall"},
{"$nk$*379637b4fcde21b2c5fbc9a00af505e997443267*#5c20384512ee36590f5f0ab38a46c6ced", "password"},
// from pass_gen.pl
{"$nk$*503476424c5362476f36463630796a6e6c656165*#2f27c20e65b88b76c913115cdec3d9a18", "test1"},
{"$nk$*7a317a71794339586c434d50506b6e4356626a67*#b62a615f605c2fd520edde76577d30f90", "thatsworking"},
{"$nk$*796b7375666d7545695032413769443977644132*#4aec90bd9a930faaa42a0d7d40056132e", "test3"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
unsigned char HASHKEY[41];
int decal;
} *cur_salt;
static inline void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
static int omp_t = 1;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
memcpy(out, ciphertext, CIPHERTEXT_LENGTH);
out[CIPHERTEXT_LENGTH] = 0;
strlwr(out);
return out;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip leading "$nk$*" */
if (!(ptr = strtokm(ctcopy, "*")))
goto error;
/* HASHKEY is of fixed length 40 */
if(hexlenl(ptr, &extra) != 40 || extra)
goto error;
if (!(ptr = strtokm(NULL, "*")))
goto error;
/* skip two characters, for "nk_tests[]" this is '#'
* followed by decal value */
if (strlen(ptr) <= 2)
goto error;
ptr += 2;
/* hash is of fixed length 32 */
if(hexlenl(ptr, &extra) != 32 || extra)
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char _ctcopy[256], *ctcopy=_ctcopy;
char *p;
int i;
strnzcpy(ctcopy, ciphertext, 255);
ctcopy += FORMAT_TAG_LEN; /* skip over "$nk$*" */
p = strtokm(ctcopy, "*");
for (i = 0; i < 20; i++)
cs.HASHKEY[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.decal = atoi16[ARCH_INDEX(p[1])];
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1 + 2;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char pass[40+1];
unsigned char out[80];
int i, k;
int idx = 0;
MD5_CTX c;
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, saved_key[index], strlen(saved_key[index]));
SHA1_Final(out, &ctx);
hex_encode(out, 20, pass);
for (i = 0, k=cur_salt->decal; i < 40; ++i, ++k) {
out[idx++] = pass[i];
if(k>19) k = 0;
out[idx++] = cur_salt->HASHKEY[k];
}
MD5_Init(&c);
MD5_Update(&c, out, 80);
MD5_Final((unsigned char*)crypt_out[index], &c);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (*((ARCH_WORD_32*)binary) == crypt_out[index][0])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return *((ARCH_WORD_32*)binary) == crypt_out[index][0];
}
static int cmp_exact(char *source, int index)
{
void *binary = get_binary(source);
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static void nk_set_key(char *key, int index)
{
strcpy(saved_key[index], key);
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_nk = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ FORMAT_TAG },
nk_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
nk_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
timestep.c |
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include <math.h>
#include <ktime.h>
#include <geometry.h>
#ifdef __USE_HW_COUNTER
#include <perf.h>
#include <kperf.h>
#endif
#include <phy.h>
/*
Calculate a time step for each cell
Note that this routine assumes conservative variables
Local time stepping, loop over faces and calculate time step as:
cdt = V / (sum(|u.n| + c.area)
This is time step for CFL=1
Late it will be multiplied by CFL
*/
void
compute_deltat2(struct delta *restrict delta)
{
#ifdef __USE_HW_COUNTER
const struct fd fd = delta->perf_counters->fd;
struct counters start;
perf_read(fd, &start);
const uint64_t icycle = __rdtsc();
#endif
struct ktime ktime;
setktime(&ktime);
const size_t nnodes = delta->nnodes;
const size_t nsnodes = delta->nsnodes;
const size_t nfnodes = delta->nfnodes;
const size_t bsz = delta->bsz;
const uint32_t *restrict nsptr = delta->nsptr;
const uint32_t *restrict nfptr = delta->nfptr;
const double *restrict s_xyz0 = delta->s_xyz0;
const double *restrict s_xyz1 = delta->s_xyz1;
const double *restrict s_xyz2 = delta->s_xyz2;
const double *restrict f_xyz0 = delta->f_xyz0;
const double *restrict f_xyz1 = delta->f_xyz1;
const double *restrict f_xyz2 = delta->f_xyz2;
const uint32_t *restrict ie = delta->ie;
const uint32_t *restrict part = delta->part;
const uint32_t *restrict n0 = delta->n0;
const uint32_t *restrict n1 = delta->n1;
const double *restrict area = delta->area;
const double *restrict q = delta->q;
const double *restrict x0 = delta->x0;
const double *restrict x1 = delta->x1;
const double *restrict x2 = delta->x2;
const double *restrict x3 = delta->x3;
double *restrict cdt = delta->cdt;
memset(cdt, 0, nnodes * sizeof(double));
#pragma omp parallel
{
const uint32_t t = omp_get_thread_num();
const uint32_t ie0 = ie[t];
const uint32_t ie1 = ie[t+1];
uint32_t i;
for(i = ie0; i < ie1; i++)
{
const double xn = x0[i];
const double yn = x1[i];
const double zn = x2[i];
const double ln = x3[i];
const double xnorm = xn * ln;
const double ynorm = yn * ln;
const double znorm = zn * ln;
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const uint32_t idx0 = bsz * node0;
const uint32_t idx1 = bsz * node1;
/* Get average values on face */
const double u = 0.5f * (q[idx0 + 1] + q[idx1 + 1]); // u
const double v = 0.5f * (q[idx0 + 2] + q[idx1 + 2]); // v
const double w = 0.5f * (q[idx0 + 3] + q[idx1 + 3]); // w
const double ubar = xn * u + yn * v + zn * w;
const double c = sqrt(ubar * ubar + BETA);
double term = u * xnorm;
term += v * ynorm;
term += w * znorm;
term = fabs(term) + c * ln;
cdt[node0] = (part[node0] == t) ? cdt[node0] + term : cdt[node0];
cdt[node1] = (part[node1] == t) ? cdt[node1] + term : cdt[node1];
}
}
/*
Now loop over boundaries and close the contours
*/
uint32_t i;
#pragma omp parallel for
for(i = 0; i < nsnodes; i++)
{
const uint32_t n = nsptr[i];
const double xn = s_xyz0[i];
const double yn = s_xyz1[i];
const double zn = s_xyz2[i];
const double ln = sqrt(xn * xn + yn * yn + zn * zn);
const double u = q[bsz * n + 1];
const double v = q[bsz * n + 2];
const double w = q[bsz * n + 3];
const double ubar = u * xn + v * yn + w * zn;
const double ubar_ = ubar / ln;
const double c = sqrt(ubar_ * ubar_ + BETA);
const double Vn = fabs(ubar) + c * ln;
cdt[n] += Vn;
}
#pragma omp parallel for
for(i = 0; i < nfnodes; i++)
{
const uint32_t n = nfptr[i];
const double xn = f_xyz0[i];
const double yn = f_xyz1[i];
const double zn = f_xyz2[i];
const double ln = sqrt(xn * xn + yn * yn + zn * zn);
const double u = q[bsz * n + 1];
const double v = q[bsz * n + 2];
const double w = q[bsz * n + 3];
const double ubar = u * xn + v * yn + w * zn;
const double ubar_ = ubar / ln;
const double c = sqrt(ubar_ * ubar_ + BETA);
const double Vn = fabs(ubar) + c * ln;
cdt[n] += Vn;
}
#pragma omp parallel for
for(i = 0; i < nnodes; i++) cdt[i] = area[i] / cdt[i];
compute_time(&ktime, delta->t);
#ifdef __USE_HW_COUNTER
const uint64_t cycle = __rdtsc() - icycle;
struct counters end;
perf_read(fd, &end);
struct tot tot;
perf_calc(start, end, &tot);
delta->perf_counters->ctrs->timestep.cycles += cycle;
delta->perf_counters->ctrs->timestep.tot.imcR += tot.imcR;
delta->perf_counters->ctrs->timestep.tot.imcW += tot.imcW;
delta->perf_counters->ctrs->timestep.tot.edcR += tot.edcR;
delta->perf_counters->ctrs->timestep.tot.edcW += tot.edcW;
#endif
}
|
branch.c | /*
Copyright (c) 2013, Intel Corporation
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*******************************************************************
NAME: Branch
PURPOSE: This program tests the effect of inner-loop branches on
application performance. We investigate four cases. The first
three all concern light-weight loops, i.e. loops that have
very few instructions associated with them.
1) branches inside vectorizable loops where the branch does
not necessarily inhibit vectorization: vector_go
2) branches inside vectorizable loops where the branch does
inhibit vectorization: vector_stop
3) branches inside non-vectorizable loops: no_vector
4) branches inside non-vectorizable loops in which each branch
corresponds to a sizeable and different set of instructions:
ins-heavy
CONSTRAINTS:
- the code should be continuously scalable, i.e. the user should
be able to specify the amount of work to be done.
- the code should be verifiable.
- the code should be executable with and without branches, with
otherwise identical amounts of work, to assess the impact of the
branches.
- the performance of the code should be dominated by the work in
the loops, not by memory bandwidth required to fetch data. This
means that arrays should fit in cache, and any loop over arrays
should be executed many times to amortize the initial memory load
costs and to remove noise from the timings.
- any arrays used should be initialized only once, to avoid confusing
performance impact of initialization with that of the branches.
Because the base loop over the array is short, it completes very
quickly, leading to very noisy results if it were timed separately.
Hence, we must time the ensemble of all iterations over the base
loop, which would include reinitializations if present.
- the branches should be "unpredictable," meaning that if the compiler
guesses them to be always taken or to be always not taken, it will
be wrong often. Otherwise the cost of a mispredicted branch may
not show up in the performance results.
- the amount of work in the codes containing the three different
types of light-weight loops should be the same to allow fair
comparisions.
- the code should not not produce overflow or underflow.
- the actual cost of computing the branch condition should be small,
so that we can assess the cost of the occurrence of the branch as
it disrupts vectorization and the hardware pipelines). If the
condition were expensive to compute and we run the code with and
without the branch, the performance difference would be exaggerated.
- Note: Casts from integer to float or double are not always vectorizable.
APPROACH:
- to avoid casts and keep conditionals inexpensive and exact, we use
only integer operations.
- we make sure that the numerical results of the codes for the
different branch structures and for the different paths following
the branch are identical.
- conditionals are simple comparisons to zero of quantities that
are computed anyway.
- initialization produces a saw-tooth pattern with frequent sign
crossings to defeat speculative branch execution.
- successive iterations over a relatively short array result simply
in a change of sign of all array elements, so that the results are
bounded, and verification values are easily computable.
USAGE: The program takes as input the number of threads, the length of the
vector loop, the number of repetitions of the loop, and the type of
branching
<progname> <# threads> <# iterations> <vector length> <branch_type>
The output consists of diagnostics to make sure the
algorithm worked, and of timing statistics.
FUNCTIONS CALLED:
Other than OpenMP or standard C functions, the following
functions are used in this program:
wtime()
bail_out()
fill_vec()
func*()
HISTORY: Written by Rob Van der Wijngaart, May 2006.
**********************************************************************************/
#include <par-res-kern_general.h>
#include <par-res-kern_omp.h>
/* the following values are only used as labels */
#define VECTOR_STOP 66
#define VECTOR_GO 77
#define NO_VECTOR 88
#define INS_HEAVY 99
#define WITH_BRANCHES 1
#define WITHOUT_BRANCHES 0
extern int fill_vec(int *vector, int vector_length, int iterations, int branch,
int *nfunc, int *rank);
int main(int argc, char ** argv)
{
int my_ID; /* Thread ID */
int vector_length; /* length of vector loop containing the branch */
int nfunc; /* number of functions used in INS_HEAVY option */
int rank; /* matrix rank used in INS_HEAVY option */
double branch_time, /* timing parameters */
no_branch_time;
double ops; /* double precision representation of integer ops */
int iterations; /* number of times the branch loop is carried out */
int i, iter, aux; /* dummies */
char *branch_type; /* string defining branching type */
int btype; /* integer encoding branching type */
int total=0,
total_ref; /* computed and stored verification values */
int nthread_input; /* thread parameters */
int nthread;
int num_error=0; /* flag that signals that requested and obtained
numbers of threads are the same */
/**********************************************************************************
** process and test input parameters
**********************************************************************************/
if (argc != 5){
printf("Usage: %s <# threads> <# iterations> <vector length>", *argv);
printf("<branching type>\n");
printf("branching type: vector_go, vector_stop, no_vector, ins_heavy\n");
exit(EXIT_FAILURE);
}
nthread_input = atoi(*++argv);
if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) {
printf("ERROR: Invalid number of threads: %d\n", nthread_input);
exit(EXIT_FAILURE);
}
omp_set_num_threads(nthread_input);
iterations = atoi(*++argv);
if (iterations < 1 || iterations%2==1){
printf("ERROR: Iterations must be positive and even : %d \n", iterations);
exit(EXIT_FAILURE);
}
vector_length = atoi(*++argv);
if (vector_length < 1){
printf("ERROR: loop length must be >= 1 : %d \n",vector_length);
exit(EXIT_FAILURE);
}
branch_type = *++argv;
if (!strcmp(branch_type,"vector_stop")) btype = VECTOR_STOP;
else if (!strcmp(branch_type,"vector_go" )) btype = VECTOR_GO;
else if (!strcmp(branch_type,"no_vector" )) btype = NO_VECTOR;
else if (!strcmp(branch_type,"ins_heavy" )) btype = INS_HEAVY;
else {
printf("Wrong branch type: %s; choose vector_stop, vector_go, ", branch_type);
printf("no_vector, or ins_heavy\n");
exit(EXIT_FAILURE);
}
#pragma omp parallel private(i, my_ID, iter, aux, nfunc, rank) reduction(+:total)
{
int * RESTRICT vector; int * RESTRICT index;
int factor = -1;
#pragma omp master
{
nthread = omp_get_num_threads();
printf("OpenMP Branching Bonanza\n");
if (nthread != nthread_input) {
num_error = 1;
printf("ERROR: number of requested threads %d does not equal ",
nthread_input);
printf("number of spawned threads %d\n", nthread);
}
else {
printf("Number of threads = %d\n", nthread_input);
printf("Vector length = %d\n", vector_length);
printf("Number of iterations = %d\n", iterations);
printf("Branching type = %s\n", branch_type);
}
}
bail_out(num_error);
my_ID = omp_get_thread_num();
vector = malloc(vector_length*2*sizeof(int));
if (!vector) {
printf("ERROR: Thread %d failed to allocate space for vector\n", my_ID);
num_error = 1;
}
bail_out(num_error);
/* grab the second half of vector to store index array */
index = vector + vector_length;
/* initialize the array with entries with varying signs; array "index" is only
used to obfuscate the compiler (i.e. it won't vectorize a loop containing
indirect referencing). It functions as the identity operator. */
for (i=0; i<vector_length; i++) {
vector[i] = 3 - (i&7);
index[i] = i;
}
#pragma omp barrier
#pragma omp master
{
branch_time = wtime();
}
/* do actual branching */
switch (btype) {
case VECTOR_STOP:
/* condition vector[index[i]]>0 inhibits vectorization */
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3 - (i&7));
if (vector[index[i]]>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3 - (i&7));
if (vector[index[i]]>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
}
break;
case VECTOR_GO:
/* condition aux>0 allows vectorization */
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3 - (i&7));
if (aux>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3 - (i&7));
if (aux>0) vector[i] -= 2*vector[i];
else vector[i] -= 2*aux;
}
}
break;
case NO_VECTOR:
/* condition aux>0 allows vectorization, but indirect indexing inbibits it */
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3 - (i&7));
if (aux>0) vector[i] -= 2*vector[index[i]];
else vector[i] -= 2*aux;
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3 - (i&7));
if (aux>0) vector[i] -= 2*vector[index[i]];
else vector[i] -= 2*aux;
}
}
break;
case INS_HEAVY:
fill_vec(vector, vector_length, iterations, WITH_BRANCHES, &nfunc, &rank);
}
#pragma omp master
{
branch_time = wtime() - branch_time;
if (btype == INS_HEAVY) {
printf("Number of matrix functions = %d\n", nfunc);
printf("Matrix order = %d\n", rank);
}
}
/* do the whole thing once more, but now without branches */
#pragma omp barrier
#pragma omp master
{
no_branch_time = wtime();
}
/* do actual branching */
switch (btype) {
case VECTOR_STOP:
case VECTOR_GO:
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3-(i&7));
vector[i] -= (vector[i] + aux);
}
for (i=0; i<vector_length; i++) {
aux = (3-(i&7));
vector[i] -= (vector[i] + aux);
}
}
break;
case NO_VECTOR:
for (iter=0; iter<iterations; iter+=2) {
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = -(3-(i&7));
vector[i] -= (vector[index[i]]+aux);
}
#pragma vector always
for (i=0; i<vector_length; i++) {
aux = (3-(i&7));
vector[i] -= (vector[index[i]]+aux);
}
}
break;
case INS_HEAVY:
fill_vec(vector, vector_length, iterations, WITHOUT_BRANCHES, &nfunc, &rank);
}
#pragma omp master
{
no_branch_time = wtime() - no_branch_time;
ops = (double)vector_length * (double)iterations * (double)nthread;
if (btype == INS_HEAVY) ops *= rank*(rank*19 + 6);
else ops *= 4;
}
for (total = 0, i=0; i<vector_length; i++) total += vector[i];
} /* end of OPENMP parallel region */
/* compute verification values */
total_ref = ((vector_length%8)*(vector_length%8-8) + vector_length)/2*nthread;
if (total == total_ref) {
printf("Solution validates\n");
printf("Rate (Mops/s) with branches: %lf time (s): %lf\n",
ops/(branch_time*1.e6), branch_time);
printf("Rate (Mops/s) without branches: %lf time (s): %lf\n",
ops/(no_branch_time*1.e6), no_branch_time);
#ifdef VERBOSE
printf("Array sum = %d, reference value = %d\n", total, total_ref);
#endif
}
else {
printf("ERROR: array sum = %d, reference value = %d\n", total, total_ref);
}
exit(EXIT_SUCCESS);
}
|
GB_unop__identity_fc64_int64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_int64)
// op(A') function: GB (_unop_tran__identity_fc64_int64)
// C type: GxB_FC64_t
// A type: int64_t
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_int64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const int64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int64_t aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__ge_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_01__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__ge_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ge_int8)
// A*D function (colscale): GB (_AxD__ge_int8)
// D*A function (rowscale): GB (_DxB__ge_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__ge_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__ge_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ge_int8)
// C=scalar+B GB (_bind1st__ge_int8)
// C=scalar+B' GB (_bind1st_tran__ge_int8)
// C=A+scalar GB (_bind2nd__ge_int8)
// C=A'+scalar GB (_bind2nd_tran__ge_int8)
// C type: bool
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_INT8 || GxB_NO_GE_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ge_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__ge_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ge_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ge_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ge_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__ge_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__ge_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csf.c |
/******************************************************************************
* INCLUDES
*****************************************************************************/
#include "csf.h"
#include "sort.h"
#include "tile.h"
#include "util.h"
#include "thread_partition.h"
#include "io.h"
#include <assert.h>
/******************************************************************************
* API FUNCTIONS
*****************************************************************************/
int splatt_csf_load(
char const * const fname,
splatt_idx_t * nmodes,
splatt_csf ** tensors,
double const * const options)
{
sptensor_t * tt = tt_read(fname);
if(tt == NULL) {
return SPLATT_ERROR_BADINPUT;
}
tt_remove_empty(tt);
*tensors = csf_alloc(tt, options);
*nmodes = tt->nmodes;
tt_free(tt);
return SPLATT_SUCCESS;
}
int splatt_csf_convert(
splatt_idx_t const nmodes,
splatt_idx_t const nnz,
splatt_idx_t ** const inds,
splatt_val_t * const vals,
splatt_csf ** tensors,
double const * const options)
{
sptensor_t tt;
tt_fill(&tt, nnz, nmodes, inds, vals);
tt_remove_empty(&tt);
*tensors = csf_alloc(&tt, options);
return SPLATT_SUCCESS;
}
void splatt_free_csf(
splatt_csf * tensors,
double const * const options)
{
csf_free(tensors, options);
}
/******************************************************************************
* PRIVATE FUNCTIONS
*****************************************************************************/
/**
* @brief Count the nonzeros below a given node in a CSF tensor.
*
* @param fptr The adjacency pointer of the CSF tensor.
* @param nmodes The number of modes in the tensor.
* @param depth The depth of the node
* @param fiber The id of the node.
*
* @return The nonzeros below fptr[depth][fiber].
*/
idx_t p_csf_count_nnz(
idx_t * * fptr,
idx_t const nmodes,
idx_t depth,
idx_t const fiber)
{
if(depth == nmodes-1) {
return 1;
}
idx_t left = fptr[depth][fiber];
idx_t right = fptr[depth][fiber+1];
++depth;
for(; depth < nmodes-1; ++depth) {
left = fptr[depth][left];
right = fptr[depth][right];
}
return right - left;
}
/**
* @brief Find a permutation of modes that results in non-increasing mode size.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_small(
idx_t const * const dims,
idx_t const nmodes,
idx_t * const perm_dims)
{
idx_t sorted[MAX_NMODES];
idx_t matched[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
sorted[m] = dims[m];
matched[m] = 0;
}
quicksort(sorted, nmodes);
/* silly n^2 comparison to grab modes from sorted dimensions.
* TODO: make a key/val sort...*/
for(idx_t mfind=0; mfind < nmodes; ++mfind) {
for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) {
if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) {
perm_dims[mfind] = mcheck;
matched[mcheck] = 1;
break;
}
}
}
}
/**
* @brief Find a permutation of modes such that the first mode is 'custom-mode'
* and the remaining are naturally ordered (0, 1, ...).
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param custom_mode The mode to place first.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_inorder(
idx_t const * const dims,
idx_t const nmodes,
idx_t const custom_mode,
idx_t * const perm_dims)
{
/* initialize to natural ordering */
for(idx_t m=0; m < nmodes; ++m) {
perm_dims[m] = m;
}
/* find where custom_mode was placed and adjust from there */
for(idx_t m=0; m < nmodes; ++m) {
if(perm_dims[m] == custom_mode) {
memmove(perm_dims + 1, perm_dims, (m) * sizeof(m));
perm_dims[0] = custom_mode;
break;
}
}
}
/**
* @brief Find a permutation of modes such that the first mode is 'custom-mode'
* and the remaining are sorted in non-increasing order.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param custom_mode The mode to place first.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_minusone(
idx_t const * const dims,
idx_t const nmodes,
idx_t const custom_mode,
idx_t * const perm_dims)
{
p_order_dims_small(dims, nmodes, perm_dims);
/* find where custom_mode was placed and adjust from there */
for(idx_t m=0; m < nmodes; ++m) {
if(perm_dims[m] == custom_mode) {
memmove(perm_dims + 1, perm_dims, (m) * sizeof(m));
perm_dims[0] = custom_mode;
break;
}
}
}
/**
* @brief Find a permutation of modes that results in non-decreasing mode size.
*
* @param dims The tensor dimensions.
* @param nmodes The number of modes.
* @param perm_dims The resulting permutation.
*/
static void p_order_dims_large(
idx_t const * const dims,
idx_t const nmodes,
idx_t * const perm_dims)
{
idx_t sorted[MAX_NMODES];
idx_t matched[MAX_NMODES];
for(idx_t m=0; m < nmodes; ++m) {
sorted[m] = dims[m];
matched[m] = 0;
}
/* sort small -> large */
quicksort(sorted, nmodes);
/* reverse list */
for(idx_t m=0; m < nmodes/2; ++m) {
idx_t tmp = sorted[nmodes-m-1];
sorted[nmodes-m-1] = sorted[m];
sorted[m] = tmp;
}
/* silly n^2 comparison to grab modes from sorted dimensions.
* TODO: make a key/val sort...*/
for(idx_t mfind=0; mfind < nmodes; ++mfind) {
for(idx_t mcheck=0; mcheck < nmodes; ++mcheck) {
if(sorted[mfind] == dims[mcheck] && !matched[mcheck]) {
perm_dims[mfind] = mcheck;
matched[mcheck] = 1;
break;
}
}
}
}
/**
* @brief Construct the sparsity structure of the outer-mode of a CSF tensor.
*
* @param ct The CSF tensor to construct.
* @param tt The coordinate tensor to construct from. Assumed to be already
* sorted.
* @param tile_id The ID of the tile to construct.
* @param nnztile_ptr A pointer into 'tt' that marks the start of each tile.
*/
static void p_mk_outerptr(
splatt_csf * const ct,
sptensor_t const * const tt,
idx_t const tile_id,
idx_t const * const nnztile_ptr)
{
idx_t const nnzstart = nnztile_ptr[tile_id];
idx_t const nnzend = nnztile_ptr[tile_id+1];
assert(nnzstart < nnzend);
idx_t const nnz = nnzend - nnzstart;
/* grab sparsity pattern */
csf_sparsity * const pt = ct->pt + tile_id;
/* grap top-level indices */
idx_t const * const restrict ttind =
nnzstart + tt->ind[csf_depth_to_mode(ct, 0)];
/* partition among threads */
int const nthreads = splatt_omp_get_max_threads();
idx_t * thread_parts = partition_simple(nnz, nthreads);
idx_t * thread_nfibs = splatt_malloc((nthreads+1) * sizeof(*thread_nfibs));
/* Fibers are counted by differing indices -- count at least one fiber */
thread_nfibs[0] = 1;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
idx_t const nnz_start = SS_MAX(thread_parts[tid], 1); /* skip first nz */
idx_t const nnz_end = thread_parts[tid+1];
/* count fibers in each thread's partition */
idx_t local_nfibs = 0;
for(idx_t x=nnz_start; x < nnz_end; ++x) {
assert(ttind[x-1] <= ttind[x]);
if(ttind[x] != ttind[x-1]) {
++local_nfibs;
}
}
thread_nfibs[tid+1] = local_nfibs; /* +1 for prefix sum */
#pragma omp barrier
#pragma omp single
{
/* prefix sum on # fibers */
for(int t=0; t < nthreads; ++t) {
thread_nfibs[t+1] += thread_nfibs[t];
}
idx_t const nfibs = thread_nfibs[nthreads];
ct->pt[tile_id].nfibs[0] = nfibs;
assert(nfibs <= ct->dims[csf_depth_to_mode(ct, 0)]);
pt->fptr[0] = splatt_malloc((nfibs+1) * sizeof(**(pt->fptr)));
/* only store top-level fids if we are tiling or there are gaps */
if((ct->ntiles > 1) || (tt->dims[csf_depth_to_mode(ct, 0)] != nfibs)) {
pt->fids[0] = splatt_malloc(nfibs * sizeof(**(pt->fids)));
pt->fids[0][0] = ttind[0];
} else {
pt->fids[0] = NULL;
}
pt->fptr[0][0] = 0;
pt->fptr[0][nfibs] = nnz;
} /* implied barrier */
idx_t * const restrict fp = pt->fptr[0];
idx_t * const restrict fi = pt->fids[0];
/* go back over non-zeros and mark fptr and fids */
idx_t nfound = thread_nfibs[tid];
if(fi == NULL) {
for(idx_t n=nnz_start; n < nnz_end; ++n) {
/* check for end of outer index */
if(ttind[n] != ttind[n-1]) {
fp[nfound++] = n;
}
}
} else {
for(idx_t n=nnz_start; n < nnz_end; ++n) {
/* check for end of outer index */
if(ttind[n] != ttind[n-1]) {
fi[nfound] = ttind[n];
fp[nfound++] = n;
}
}
}
} /* end omp parallel */
splatt_free(thread_parts);
splatt_free(thread_nfibs);
}
/**
* @brief Construct the sparsity structure of any mode but the last. The first
* (root) mode is handled by p_mk_outerptr and the first is simply a copy
* of the nonzeros.
*
* @param ct The CSF tensor to construct.
* @param tt The coordinate tensor to construct from. Assumed to be already
* sorted.
* @param tile_id The ID of the tile to construct.
* @param nnztile_ptr A pointer into 'tt' that marks the start of each tile.
* @param mode Which mode we are constructing.
*/
static void p_mk_fptr(
splatt_csf * const ct,
sptensor_t const * const tt,
idx_t const tile_id,
idx_t const * const nnztile_ptr,
idx_t const mode)
{
assert(mode < ct->nmodes);
idx_t const nnzstart = nnztile_ptr[tile_id];
idx_t const nnzend = nnztile_ptr[tile_id+1];
idx_t const nnz = nnzend - nnzstart;
/* outer mode is easy; just look at outer indices */
if(mode == 0) {
p_mk_outerptr(ct, tt, tile_id, nnztile_ptr);
return;
}
/* the mode after accounting for dim_perm */
idx_t const * const restrict ttind =
nnzstart + tt->ind[csf_depth_to_mode(ct, mode)];
/* grab sparsity pattern */
csf_sparsity * const pt = ct->pt + tile_id;
/* we will edit this to point to the new fiber idxs instead of nnz */
idx_t * const restrict fprev = pt->fptr[mode-1];
/* partition among threads */
int const nthreads = splatt_omp_get_max_threads();
idx_t * thread_parts = partition_simple(pt->nfibs[mode-1], nthreads);
idx_t * thread_nfibs = splatt_malloc((nthreads+1) * sizeof(*thread_nfibs));
thread_nfibs[0] = 0;
#pragma omp parallel
{
int const tid = splatt_omp_get_thread_num();
idx_t const slice_start = thread_parts[tid];
idx_t const slice_end = thread_parts[tid+1];
/* first count nfibers */
/* foreach 'slice' in the previous dimension */
idx_t local_nfibs = 0;
for(idx_t s=slice_start; s < slice_end; ++s) {
++local_nfibs; /* one by default per 'slice' */
/* count fibers in current hyperplane*/
for(idx_t f=fprev[s]+1; f < fprev[s+1]; ++f) {
if(ttind[f] != ttind[f-1]) {
++local_nfibs;
}
}
}
thread_nfibs[tid+1] = local_nfibs; /* +1 for prefix sum */
idx_t const fprev_end = fprev[slice_end];
#pragma omp barrier
#pragma omp single
{
/* prefix sum on # fibers */
for(int t=0; t < nthreads; ++t) {
thread_nfibs[t+1] += thread_nfibs[t];
}
idx_t const nfibs = thread_nfibs[nthreads];
pt->nfibs[mode] = nfibs;
pt->fptr[mode] = splatt_malloc((nfibs+1) * sizeof(**(pt->fptr)));
pt->fptr[mode][0] = 0;
pt->fids[mode] = splatt_malloc(nfibs * sizeof(**(pt->fids)));
} /* implied barrier */
idx_t * const restrict fp = pt->fptr[mode];
idx_t * const restrict fi = pt->fids[mode];
/* now fill in fiber info */
idx_t nfound = thread_nfibs[tid];
for(idx_t s=slice_start; s < slice_end; ++s) {
idx_t const start = fprev[s]+1;
idx_t const end = (s == slice_end - 1) ? fprev_end : fprev[s+1];
/* mark start of subtree */
fprev[s] = nfound;
fi[nfound] = ttind[start-1];
fp[nfound++] = start-1;
/* mark fibers in current hyperplane */
for(idx_t f=start; f < end; ++f) {
if(ttind[f] != ttind[f-1]) {
fi[nfound] = ttind[f];
fp[nfound++] = f;
}
}
}
/* mark end of last hyperplane */
if(tid == nthreads - 1) {
fprev[pt->nfibs[mode-1]] = thread_nfibs[nthreads];
fp[thread_nfibs[nthreads]] = nnz;
}
} /* end omp parallel */
splatt_free(thread_parts);
splatt_free(thread_nfibs);
}
/**
* @brief Allocate and fill a CSF tensor from a coordinate tensor without
* tiling.
*
* @param ct The CSF tensor to fill out.
* @param tt The sparse tensor to start from.
*/
static void p_csf_alloc_untiled(
splatt_csf * const ct,
sptensor_t * const tt)
{
idx_t const nmodes = tt->nmodes;
tt_sort(tt, ct->dim_perm[0], ct->dim_perm);
ct->ntiles = 1;
ct->ntiled_modes = 0;
for(idx_t m=0; m < nmodes; ++m) {
ct->tile_dims[m] = 1;
}
ct->pt = splatt_malloc(sizeof(*(ct->pt)));
csf_sparsity * const pt = ct->pt;
/* last row of fptr is just nonzero inds */
pt->nfibs[nmodes-1] = ct->nnz;
pt->fids[nmodes-1] = splatt_malloc(ct->nnz * sizeof(**(pt->fids)));
pt->vals = splatt_malloc(ct->nnz * sizeof(*(pt->vals)));
par_memcpy(pt->fids[nmodes-1], tt->ind[csf_depth_to_mode(ct, nmodes-1)],
ct->nnz * sizeof(**(pt->fids)));
par_memcpy(pt->vals, tt->vals, ct->nnz * sizeof(*(pt->vals)));
/* setup a basic tile ptr for one tile */
idx_t nnz_ptr[2];
nnz_ptr[0] = 0;
nnz_ptr[1] = tt->nnz;
/* create fptr entries for the rest of the modes, working down from roots.
* Skip the bottom level (nnz) */
for(idx_t m=0; m < tt->nmodes-1; ++m) {
p_mk_fptr(ct, tt, 0, nnz_ptr, m);
}
}
/**
* @brief Reorder the nonzeros in a sparse tensor using dense tiling and fill
* a CSF tensor with the data.
*
* @param ct The CSF tensor to fill.
* @param tt The sparse tensor to start from.
* @param splatt_opts Options array for SPLATT - used for tile dimensions.
*/
static void p_csf_alloc_densetile(
splatt_csf * const ct,
sptensor_t * const tt,
double const * const splatt_opts)
{
idx_t const nmodes = tt->nmodes;
/* how many levels we tile (counting from the bottom) */
ct->ntiled_modes = (idx_t)splatt_opts[SPLATT_OPTION_TILELEVEL];
ct->ntiled_modes = SS_MIN(ct->ntiled_modes, ct->nmodes);
/* how many levels from the root do we start tiling? */
idx_t const tile_depth = ct->nmodes - ct->ntiled_modes;
idx_t ntiles = 1;
for(idx_t m=0; m < nmodes; ++m) {
idx_t const depth = csf_mode_to_depth(ct, m);
if(depth >= tile_depth) {
ct->tile_dims[m] = (idx_t) splatt_opts[SPLATT_OPTION_NTHREADS];
} else {
ct->tile_dims[m] = 1;
}
ntiles *= ct->tile_dims[m];
}
/* perform tensor tiling */
tt_sort(tt, ct->dim_perm[0], ct->dim_perm);
idx_t * nnz_ptr = tt_densetile(tt, ct->tile_dims);
ct->ntiles = ntiles;
ct->pt = splatt_malloc(ntiles * sizeof(*(ct->pt)));
for(idx_t t=0; t < ntiles; ++t) {
idx_t const startnnz = nnz_ptr[t];
idx_t const endnnz = nnz_ptr[t+1];
idx_t const ptnnz = endnnz - startnnz;
csf_sparsity * const pt = ct->pt + t;
/* empty tile */
if(ptnnz == 0) {
for(idx_t m=0; m < ct->nmodes; ++m) {
pt->fptr[m] = NULL;
pt->fids[m] = NULL;
pt->nfibs[m] = 0;
}
/* first fptr may be accessed anyway */
pt->fptr[0] = (idx_t *) splatt_malloc(2 * sizeof(**(pt->fptr)));
pt->fptr[0][0] = 0;
pt->fptr[0][1] = 0;
pt->vals = NULL;
continue;
}
idx_t const leaves = nmodes-1;
/* last row of fptr is just nonzero inds */
pt->nfibs[leaves] = ptnnz;
pt->fids[leaves] = splatt_malloc(ptnnz * sizeof(**(pt->fids)));
par_memcpy(pt->fids[leaves], tt->ind[csf_depth_to_mode(ct, leaves)] + startnnz,
ptnnz * sizeof(**(pt->fids)));
pt->vals = splatt_malloc(ptnnz * sizeof(*(pt->vals)));
par_memcpy(pt->vals, tt->vals + startnnz, ptnnz * sizeof(*(pt->vals)));
/* create fptr entries for the rest of the modes */
for(idx_t m=0; m < leaves; ++m) {
p_mk_fptr(ct, tt, t, nnz_ptr, m);
}
}
splatt_free(nnz_ptr);
}
/**
* @brief Construct dim_iperm, which is the inverse of dim_perm.
*
* @param ct The CSF tensor.
*/
static void p_fill_dim_iperm(
splatt_csf * const ct)
{
for(idx_t level=0; level < ct->nmodes; ++level) {
ct->dim_iperm[ct->dim_perm[level]] = level;
}
}
/**
* @brief Allocate and fill a CSF tensor.
*
* @param ct The CSF tensor to fill.
* @param tt The coordinate tensor to work from.
* @param mode_type The allocation scheme for the CSF tensor.
* @param mode Which mode we are converting for (if applicable).
* @param splatt_opts Used to determine tiling scheme.
*/
static void p_mk_csf(
splatt_csf * const ct,
sptensor_t * const tt,
csf_mode_type mode_type,
idx_t const mode,
double const * const splatt_opts)
{
assert(false);
ct->nnz = tt->nnz;
ct->nmodes = tt->nmodes;
for(idx_t m=0; m < tt->nmodes; ++m) {
ct->dims[m] = tt->dims[m];
}
/* get the indices in order */
csf_find_mode_order(tt->dims, tt->nmodes, mode_type, mode, ct->dim_perm);
p_fill_dim_iperm(ct);
ct->which_tile = splatt_opts[SPLATT_OPTION_TILE];
switch(ct->which_tile) {
case SPLATT_NOTILE:
p_csf_alloc_untiled(ct, tt);
break;
case SPLATT_DENSETILE:
exit(1);
p_csf_alloc_densetile(ct, tt, splatt_opts);
break;
default:
fprintf(stderr, "SPLATT: tiling '%d' unsupported for CSF tensors.\n",
ct->which_tile);
break;
}
}
/******************************************************************************
* PUBLIC FUNCTIONS
*****************************************************************************/
void csf_free(
splatt_csf * const csf,
double const * const opts)
{
idx_t ntensors = 0;
splatt_csf_type which = opts[SPLATT_OPTION_CSF_ALLOC];
switch(which) {
case SPLATT_CSF_ONEMODE:
ntensors = 1;
break;
case SPLATT_CSF_TWOMODE:
ntensors = 2;
break;
case SPLATT_CSF_ALLMODE:
ntensors = csf[0].nmodes;
break;
}
for(idx_t i=0; i < ntensors; ++i) {
csf_free_mode(csf + i);
}
free(csf);
}
void csf_free_mode(
splatt_csf * const csf)
{
/* free each tile of sparsity pattern */
for(idx_t t=0; t < csf->ntiles; ++t) {
free(csf->pt[t].vals);
free(csf->pt[t].fids[csf->nmodes-1]);
for(idx_t m=0; m < csf->nmodes-1; ++m) {
free(csf->pt[t].fptr[m]);
free(csf->pt[t].fids[m]);
}
}
free(csf->pt);
}
void csf_find_mode_order(
idx_t const * const dims,
idx_t const nmodes,
csf_mode_type which,
idx_t const mode,
idx_t * const perm_dims)
{
switch(which) {
case CSF_SORTED_SMALLFIRST:
p_order_dims_small(dims, nmodes, perm_dims);
break;
case CSF_SORTED_BIGFIRST:
p_order_dims_large(dims, nmodes, perm_dims);
break;
case CSF_INORDER_MINUSONE:
p_order_dims_inorder(dims, nmodes, mode, perm_dims);
break;
case CSF_SORTED_MINUSONE:
p_order_dims_minusone(dims, nmodes, mode, perm_dims);
break;
/* no-op, perm_dims better be set... */
case CSF_MODE_CUSTOM:
break;
default:
fprintf(stderr, "SPLATT: csf_mode_type '%d' not recognized.\n", which);
break;
}
}
size_t csf_storage(
splatt_csf const * const tensors,
double const * const opts)
{
idx_t ntensors = 0;
splatt_csf_type which_alloc = opts[SPLATT_OPTION_CSF_ALLOC];
switch(which_alloc) {
case SPLATT_CSF_ONEMODE:
ntensors = 1;
break;
case SPLATT_CSF_TWOMODE:
ntensors = 2;
break;
case SPLATT_CSF_ALLMODE:
ntensors = tensors[0].nmodes;
break;
}
size_t bytes = 0;
for(idx_t m=0; m < ntensors; ++m) {
splatt_csf const * const ct = tensors + m;
bytes += ct->nnz * sizeof(*(ct->pt->vals)); /* vals */
bytes += ct->nnz * sizeof(**(ct->pt->fids)); /* fids[nmodes] */
bytes += ct->ntiles * sizeof(*(ct->pt)); /* pt */
for(idx_t t=0; t < ct->ntiles; ++t) {
csf_sparsity const * const pt = ct->pt + t;
for(idx_t m=0; m < ct->nmodes-1; ++m) {
bytes += (pt->nfibs[m]+1) * sizeof(**(pt->fptr)); /* fptr */
if(pt->fids[m] != NULL) {
bytes += pt->nfibs[m] * sizeof(**(pt->fids)); /* fids */
}
}
}
}
return bytes;
}
splatt_csf * csf_alloc(
sptensor_t * const tt,
double const * const opts)
{
splatt_csf * ret = NULL;
double * tmp_opts = NULL;
idx_t last_mode = 0;
int tmp = 0;
switch((splatt_csf_type) opts[SPLATT_OPTION_CSF_ALLOC]) {
case SPLATT_CSF_ONEMODE:
exit(1);
ret = splatt_malloc(sizeof(*ret));
p_mk_csf(ret, tt, CSF_SORTED_SMALLFIRST, 0, opts);
break;
case SPLATT_CSF_TWOMODE:
ret = splatt_malloc(2 * sizeof(*ret));
/* regular CSF allocation */
p_mk_csf(ret + 0, tt, CSF_SORTED_SMALLFIRST, 0, opts);
/* make a copy of opts and don't tile the last mode
* TODO make this configurable? */
tmp_opts = splatt_default_opts();
memcpy(tmp_opts, opts, SPLATT_OPTION_NOPTIONS * sizeof(*opts));
tmp_opts[SPLATT_OPTION_TILE] = SPLATT_NOTILE;
/* allocate with no tiling for the last mode */
last_mode = csf_depth_to_mode(&(ret[0]), tt->nmodes-1);
p_mk_csf(ret + 1, tt, CSF_SORTED_MINUSONE, last_mode, tmp_opts);
free(tmp_opts);
break;
case SPLATT_CSF_ALLMODE:
exit(1);
ret = splatt_malloc(tt->nmodes * sizeof(*ret));
for(idx_t m=0; m < tt->nmodes; ++m) {
p_mk_csf(ret + m, tt, CSF_SORTED_MINUSONE, m, opts);
}
break;
}
return ret;
}
void csf_alloc_mode(
sptensor_t * const tt,
csf_mode_type which_ordering,
idx_t const mode_special,
splatt_csf * const csf,
double const * const opts)
{
p_mk_csf(csf, tt, which_ordering, mode_special, opts);
}
val_t csf_frobsq(
splatt_csf const * const tensor)
{
/* accumulate into double to help with some precision loss */
double norm = 0;
#pragma omp parallel reduction(+:norm)
{
for(idx_t t=0; t < tensor->ntiles; ++t) {
val_t const * const vals = tensor->pt[t].vals;
if(vals == NULL) {
continue;
}
idx_t const nnz = tensor->pt[t].nfibs[tensor->nmodes-1];
#pragma omp for schedule(static) nowait
for(idx_t n=0; n < nnz; ++n) {
norm += vals[n] * vals[n];
}
}
} /* end omp parallel */
return (val_t) norm;
}
idx_t * csf_partition_1d(
splatt_csf const * const csf,
idx_t const tile_id,
idx_t const nparts)
{
idx_t const nslices = csf->pt[tile_id].nfibs[0];
idx_t * weights = splatt_malloc(nslices * sizeof(*weights));
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < nslices; ++i) {
weights[i] = p_csf_count_nnz(csf->pt[tile_id].fptr, csf->nmodes, 0, i);
}
idx_t bneck;
idx_t * parts = partition_weighted(weights, nslices, nparts, &bneck);
splatt_free(weights);
return parts;
}
idx_t * csf_partition_tiles_1d(
splatt_csf const * const csf,
idx_t const nparts)
{
idx_t const nmodes = csf->nmodes;
idx_t const ntiles = csf->ntiles;
idx_t * weights = splatt_malloc(ntiles * sizeof(*weights));
#pragma omp parallel for schedule(static)
for(idx_t i=0; i < ntiles; ++i) {
weights[i] = csf->pt[i].nfibs[nmodes-1];
}
idx_t bneck;
idx_t * parts = partition_weighted(weights, ntiles, nparts, &bneck);
splatt_free(weights);
return parts;
}
|
residual_displacement_and_other_dof_criteria.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_RESIDUAL_DISPLACEMENT_AND_OTHER_DOF_CRITERIA )
#define KRATOS_RESIDUAL_DISPLACEMENT_AND_OTHER_DOF_CRITERIA
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
#include "includes/define.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ResidualDisplacementAndOtherDoFCriteria
* @ingroup StructuralMechanicsApplication
* @brief This is a convergence criteria that employes the residual as criteria, it divides the problem in two dofs, displacement and another one
* @details The reactions from the RHS are not computed in the residual. Can be used with for example rotations or pressure
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace,
class TDenseSpace
>
class ResidualDisplacementAndOtherDoFCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION( ResidualDisplacementAndOtherDoFCriteria );
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef std::size_t IndexType;
typedef std::size_t SizeType;
///@}
///@name Life Cycle
///@{
/** Constructor.
* @param RatioTolerance: Relative tolerance for error
* @param AbsoluteTolerance: Absolute tolerance for error
* @param OtherDoFName: The name of the other DoF
*/
ResidualDisplacementAndOtherDoFCriteria(
TDataType RatioTolerance,
TDataType AbsoluteTolerance,
const std::string OtherDoFName = "ROTATION"
)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mOtherDoFName(OtherDoFName),
mRatioTolerance(RatioTolerance),
mAbsoluteTolerance(AbsoluteTolerance)
{
this->mActualizeRHSIsNeeded = true;
}
//* Copy constructor.
ResidualDisplacementAndOtherDoFCriteria( ResidualDisplacementAndOtherDoFCriteria const& rOther )
:BaseType(rOther)
,mOtherDoFName(rOther.mOtherDoFName)
,mRatioTolerance(rOther.mRatioTolerance)
,mAbsoluteTolerance(rOther.mAbsoluteTolerance)
,mInitialResidualDispNorm(rOther.mInitialResidualDispNorm)
,mCurrentResidualDispNorm(rOther.mCurrentResidualDispNorm)
,mInitialResidualOtherDoFNorm(rOther.mInitialResidualOtherDoFNorm)
,mCurrentResidualOtherDoFNorm(rOther.mCurrentResidualOtherDoFNorm)
{
this->mActualizeRHSIsNeeded = true;
}
//* Destructor.
~ResidualDisplacementAndOtherDoFCriteria() override {}
///@}
///@name Operators
///@{
/**
* Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
if (TSparseSpace::Size(rb) != 0) { //if we are solving for something
TDataType ratio_displacement = 0.0;
TDataType ratio_other_dof = 0.0;
SizeType disp_size;
CalculateResidualNorm(rModelPart, mCurrentResidualDispNorm, mCurrentResidualOtherDoFNorm, disp_size, rDofSet, rb);
if (mInitialResidualDispNorm == 0.0) {
ratio_displacement = 0.0;
} else {
ratio_displacement = mCurrentResidualDispNorm/mInitialResidualDispNorm;
}
if (mInitialResidualOtherDoFNorm == 0.0) {
ratio_other_dof = 0.0;
} else {
ratio_other_dof = mCurrentResidualOtherDoFNorm/mInitialResidualOtherDoFNorm;
}
const std::size_t system_size = TSparseSpace::Size(rb);
const TDataType absolute_norm_disp = (mCurrentResidualDispNorm/static_cast<TDataType>(disp_size));
const TDataType absolute_norm_other_dof = (mCurrentResidualOtherDoFNorm/static_cast<TDataType>(system_size - disp_size));
KRATOS_INFO_IF("ResidualDisplacementAndOtherDoFCriteria", this->GetEchoLevel() > 0) << "RESIDUAL DISPLACEMENT CRITERION :: Ratio = "<< ratio_displacement << "; Norm = " << absolute_norm_disp << std::endl;
KRATOS_INFO_IF("ResidualDisplacementAndOtherDoFCriteria", this->GetEchoLevel() > 0) << "RESIDUAL " << mOtherDoFName << " CRITERION :: Ratio = "<< ratio_other_dof << "; Norm = " << absolute_norm_other_dof << std::endl;
rModelPart.GetProcessInfo()[CONVERGENCE_RATIO] = ratio_displacement;
rModelPart.GetProcessInfo()[RESIDUAL_NORM] = absolute_norm_disp;
if ((ratio_displacement <= mRatioTolerance || absolute_norm_disp < mAbsoluteTolerance) && (ratio_other_dof <= mRatioTolerance || absolute_norm_other_dof < mAbsoluteTolerance)) {
KRATOS_INFO_IF("ResidualDisplacementAndOtherDoFCriteria", this->GetEchoLevel() > 0) << "Convergence is achieved" << std::endl;
return true;
} else {
return false;
}
} else {
return true;
}
}
/**
* This function initialize the convergence criteria
* @param rModelPart Reference to the ModelPart containing the contact problem. (unused)
*/
void Initialize(
ModelPart& rModelPart
) override
{
BaseType::mConvergenceCriteriaIsInitialized = true;
}
/**
* This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param A System matrix (unused)
* @param Dx Vector of results (variations on nodal variables)
* @param b RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
BaseType::InitializeSolutionStep(rModelPart, rDofSet, rA, rDx, rb);
// Filling mActiveDofs when MPC exist
if (rModelPart.NumberOfMasterSlaveConstraints() > 0) {
mActiveDofs.resize(rDofSet.size());
#pragma omp parallel for
for(int i=0; i<static_cast<int>(mActiveDofs.size()); ++i) {
mActiveDofs[i] = true;
}
#pragma omp parallel for
for (int i=0; i<static_cast<int>(rDofSet.size()); ++i) {
const auto it_dof = rDofSet.begin() + i;
if (it_dof->IsFixed()) {
mActiveDofs[it_dof->EquationId()] = false;
}
}
for (const auto& r_mpc : rModelPart.MasterSlaveConstraints()) {
for (const auto& r_dof : r_mpc.GetMasterDofsVector()) {
mActiveDofs[r_dof->EquationId()] = false;
}
for (const auto& r_dof : r_mpc.GetSlaveDofsVector()) {
mActiveDofs[r_dof->EquationId()] = false;
}
}
}
SizeType size_residual;
CalculateResidualNorm(rModelPart, mInitialResidualDispNorm, mInitialResidualOtherDoFNorm, size_residual, rDofSet, rb);
}
///@}
///@name Operations
///@{
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
std::string mOtherDoFName; // The name of the other DoF
TDataType mInitialResidualDispNorm; // The initial residual norm for displacements
TDataType mCurrentResidualDispNorm; // The current residual norm for displacements
TDataType mInitialResidualOtherDoFNorm; // The initial residual norm for displacements
TDataType mCurrentResidualOtherDoFNorm; // The current residual norm for displacements
TDataType mRatioTolerance; // The tolerance admited in the ratio
TDataType mAbsoluteTolerance; // The tolerance admited in the absolutte value
std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method computes the norm of the residual
* @details It checks if the dof is fixed
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rResidualSolutionNorm The norm of the residual
* @param rDofNum The number of DoFs
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rb RHS vector (residual + reactions)
*/
virtual void CalculateResidualNorm(
ModelPart& rModelPart,
TDataType& rResidualSolutionNormDisp,
TDataType& rResidualSolutionNormOtherDof,
SizeType& rDofNumDisp,
DofsArrayType& rDofSet,
const TSystemVectorType& rb
)
{
// Initialize
TDataType residual_solution_norm_disp = TDataType();
TDataType residual_solution_norm_other_dof = TDataType();
SizeType disp_dof_num = 0;
// Auxiliar values
TDataType residual_dof_value = 0.0;
const auto it_dof_begin = rDofSet.begin();
const int number_of_dof = static_cast<int>(rDofSet.size());
// Loop over Dofs
if (rModelPart.NumberOfMasterSlaveConstraints() > 0) {
#pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm_disp, residual_solution_norm_other_dof, disp_dof_num)
for (int i = 0; i < number_of_dof; i++) {
auto it_dof = it_dof_begin + i;
const IndexType dof_id = it_dof->EquationId();
residual_dof_value = TSparseSpace::GetValue(rb,dof_id);
if (mActiveDofs[dof_id]) {
if (it_dof->GetVariable() == DISPLACEMENT_X || it_dof->GetVariable() == DISPLACEMENT_Y || it_dof->GetVariable() == DISPLACEMENT_Z) {
residual_solution_norm_disp += std::pow(residual_dof_value, 2);
disp_dof_num++;
} else {
residual_solution_norm_other_dof += std::pow(residual_dof_value, 2);
}
}
}
} else {
#pragma omp parallel for firstprivate(residual_dof_value) reduction(+:residual_solution_norm_disp, residual_solution_norm_other_dof, disp_dof_num)
for (int i = 0; i < number_of_dof; i++) {
auto it_dof = it_dof_begin + i;
if (!it_dof->IsFixed()) {
const IndexType dof_id = it_dof->EquationId();
residual_dof_value = TSparseSpace::GetValue(rb,dof_id);
if (it_dof->GetVariable() == DISPLACEMENT_X || it_dof->GetVariable() == DISPLACEMENT_Y || it_dof->GetVariable() == DISPLACEMENT_Z) {
residual_solution_norm_disp += std::pow(residual_dof_value, 2);
disp_dof_num++;
} else {
residual_solution_norm_other_dof += std::pow(residual_dof_value, 2);
}
}
}
}
rDofNumDisp = disp_dof_num;
rResidualSolutionNormDisp = std::sqrt(residual_solution_norm_disp);
rResidualSolutionNormOtherDof = std::sqrt(residual_solution_norm_other_dof);
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; // Class ClassName
///@}
///@name Type Definitions
///@{
///@}
} // namespace Kratos.
#endif // KRATOS_RESIDUAL_DISPLACEMENT_AND_OTHER_DOF_CRITERIA defined
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 16;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
t_cholmod_super_numeric.c | /* ========================================================================== */
/* === Supernodal/t_cholmod_super_numeric =================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/Supernodal Module. Copyright (C) 2005-2012, Timothy A. Davis
* http://www.suitesparse.com
* -------------------------------------------------------------------------- */
/* Template routine for cholmod_super_numeric. All xtypes supported, except
* that a zomplex A and F result in a complex L (there is no supernodal
* zomplex L).
*/
/* ========================================================================== */
/* === complex arithmetic =================================================== */
/* ========================================================================== */
#include "cholmod_template.h"
#undef L_ENTRY
#undef L_CLEAR
#undef L_ASSIGN
#undef L_MULTADD
#undef L_ASSEMBLE
#undef L_ASSEMBLESUB
#ifdef REAL
/* -------------------------------------------------------------------------- */
/* A, F, and L are all real */
/* -------------------------------------------------------------------------- */
#define L_ENTRY 1
#define L_CLEAR(Lx,p) Lx [p] = 0
#define L_ASSIGN(Lx,q, Ax,Az,p) Lx [q] = Ax [p]
#define L_MULTADD(Lx,q, Ax,Az,p, f) Lx [q] += Ax [p] * f [0]
#define L_ASSEMBLE(Lx,q,b) Lx [q] += b [0]
#define L_ASSEMBLESUB(Lx,q,C,p) Lx [q] -= C [p]
#else
/* -------------------------------------------------------------------------- */
/* A and F are complex or zomplex, L and C are complex */
/* -------------------------------------------------------------------------- */
#define L_ENTRY 2
#define L_CLEAR(Lx,p) Lx [2*(p)] = 0 ; Lx [2*(p)+1] = 0
#define L_ASSEMBLE(Lx,q,b) Lx [2*(q)] += b [0] ;
#define L_ASSEMBLESUB(Lx,q,C,p) \
Lx [2*(q) ] -= C [2*(p) ] ; \
Lx [2*(q)+1] -= C [2*(p)+1] ;
#ifdef COMPLEX
/* -------------------------------------------------------------------------- */
/* A, F, L, and C are all complex */
/* -------------------------------------------------------------------------- */
#define L_ASSIGN(Lx,q, Ax,Az,p) \
Lx [2*(q) ] = Ax [2*(p) ] ; \
Lx [2*(q)+1] = Ax [2*(p)+1]
#define L_MULTADD(Lx,q, Ax,Az,p, f) \
Lx [2*(q) ] += Ax [2*(p) ] * f [0] - Ax [2*(p)+1] * f [1] ; \
Lx [2*(q)+1] += Ax [2*(p)+1] * f [0] + Ax [2*(p) ] * f [1]
#else
/* -------------------------------------------------------------------------- */
/* A and F are zomplex, L and C is complex */
/* -------------------------------------------------------------------------- */
#define L_ASSIGN(Lx,q, Ax,Az,p) \
Lx [2*(q) ] = Ax [p] ; \
Lx [2*(q)+1] = Az [p] ;
#define L_MULTADD(Lx,q, Ax,Az,p, f) \
Lx [2*(q) ] += Ax [p] * f [0] - Az [p] * f [1] ; \
Lx [2*(q)+1] += Az [p] * f [0] + Ax [p] * f [1]
#endif
#endif
/* ========================================================================== */
/* === t_cholmod_super_numeric ============================================== */
/* ========================================================================== */
/* This function returns FALSE only if integer overflow occurs in the BLAS.
* It returns TRUE otherwise whether or not the matrix is positive definite. */
static int TEMPLATE (cholmod_super_numeric)
(
/* ---- input ---- */
cholmod_sparse *A, /* matrix to factorize */
cholmod_sparse *F, /* F = A' or A(:,f)' */
double beta [2], /* beta*I is added to diagonal of matrix to factorize */
/* ---- in/out --- */
cholmod_factor *L, /* factorization */
/* -- workspace -- */
cholmod_dense *Cwork, /* size (L->maxcsize)-by-1 */
/* --------------- */
cholmod_common *Common
)
{
double one [2], zero [2], tstart ;
double *Lx, *Ax, *Fx, *Az, *Fz, *C ;
Int *Super, *Head, *Ls, *Lpi, *Lpx, *Map, *SuperMap, *RelativeMap, *Next,
*Lpos, *Fp, *Fi, *Fnz, *Ap, *Ai, *Anz, *Iwork, *Next_save, *Lpos_save,
*Previous;
Int nsuper, n, j, i, k, s, p, pend, k1, k2, nscol, psi, psx, psend, nsrow,
pj, d, kd1, kd2, info, ndcol, ndrow, pdi, pdx, pdend, pdi1, pdi2, pdx1,
ndrow1, ndrow2, px, dancestor, sparent, dnext, nsrow2, ndrow3, pk, pf,
pfend, stype, Apacked, Fpacked, q, imap, repeat_supernode, nscol2, ss,
tail, nscol_new = 0;
/* ---------------------------------------------------------------------- */
/* declarations for the GPU */
/* ---------------------------------------------------------------------- */
/* these variables are not used if the GPU module is not installed */
#ifdef GPU_BLAS
Int ndescendants, mapCreatedOnGpu, supernodeUsedGPU,
idescendant, dlarge, dsmall, skips ;
int iHostBuff, iDevBuff, useGPU, GPUavailable ;
cholmod_gpu_pointers *gpu_p, gpu_pointer_struct ;
gpu_p = &gpu_pointer_struct ;
#endif
/* ---------------------------------------------------------------------- */
/* guard against integer overflow in the BLAS */
/* ---------------------------------------------------------------------- */
/* If integer overflow occurs in the BLAS, Common->status is set to
* CHOLMOD_TOO_LARGE, and the contents of Lx are undefined. */
Common->blas_ok = TRUE ;
/* ---------------------------------------------------------------------- */
/* get inputs */
/* ---------------------------------------------------------------------- */
nsuper = L->nsuper ;
n = L->n ;
C = Cwork->x ; /* workspace of size L->maxcsize */
one [0] = 1.0 ; /* ALPHA for *syrk, *herk, *gemm, and *trsm */
one [1] = 0. ;
zero [0] = 0. ; /* BETA for *syrk, *herk, and *gemm */
zero [1] = 0. ;
/* Iwork must be of size 2n + 5*nsuper, allocated in the caller,
* cholmod_super_numeric. The memory cannot be allocated here because the
* cholmod_super_numeric initializes SuperMap, and cholmod_allocate_work
* does not preserve existing workspace if the space needs to be increase
* in size. */
/* allocate integer workspace */
Iwork = Common->Iwork ;
SuperMap = Iwork ; /* size n (i/i/l) */
RelativeMap = Iwork + n ; /* size n (i/i/l) */
Next = Iwork + 2*((size_t) n) ; /* size nsuper*/
Lpos = Iwork + 2*((size_t) n) + nsuper ; /* size nsuper*/
Next_save = Iwork + 2*((size_t) n) + 2*((size_t) nsuper) ;/* size nsuper*/
Lpos_save = Iwork + 2*((size_t) n) + 3*((size_t) nsuper) ;/* size nsuper*/
Previous = Iwork + 2*((size_t) n) + 4*((size_t) nsuper) ;/* size nsuper*/
Map = Common->Flag ; /* size n, use Flag as workspace for Map array */
Head = Common->Head ; /* size n+1, only Head [0..nsuper-1] used */
Ls = L->s ;
Lpi = L->pi ;
Lpx = L->px ;
Super = L->super ;
Lx = L->x ;
#ifdef GPU_BLAS
/* local copy of useGPU */
if ( (Common->useGPU == 1) && L->useGPU)
{
/* Initialize the GPU. If not found, don't use it. */
useGPU = TEMPLATE2 (CHOLMOD (gpu_init))
(C, L, Common, nsuper, n, Lpi[nsuper]-Lpi[0], gpu_p) ;
}
else
{
useGPU = 0;
}
/* fprintf (stderr, "local useGPU %d\n", useGPU) ; */
#endif
#ifndef NTIMER
/* clear GPU / CPU statistics */
Common->CHOLMOD_CPU_GEMM_CALLS = 0 ;
Common->CHOLMOD_CPU_SYRK_CALLS = 0 ;
Common->CHOLMOD_CPU_TRSM_CALLS = 0 ;
Common->CHOLMOD_CPU_POTRF_CALLS = 0 ;
Common->CHOLMOD_GPU_GEMM_CALLS = 0 ;
Common->CHOLMOD_GPU_SYRK_CALLS = 0 ;
Common->CHOLMOD_GPU_TRSM_CALLS = 0 ;
Common->CHOLMOD_GPU_POTRF_CALLS = 0 ;
Common->CHOLMOD_CPU_GEMM_TIME = 0 ;
Common->CHOLMOD_CPU_SYRK_TIME = 0 ;
Common->CHOLMOD_CPU_TRSM_TIME = 0 ;
Common->CHOLMOD_CPU_POTRF_TIME = 0 ;
Common->CHOLMOD_GPU_GEMM_TIME = 0 ;
Common->CHOLMOD_GPU_SYRK_TIME = 0 ;
Common->CHOLMOD_GPU_TRSM_TIME = 0 ;
Common->CHOLMOD_GPU_POTRF_TIME = 0 ;
Common->CHOLMOD_ASSEMBLE_TIME = 0 ;
Common->CHOLMOD_ASSEMBLE_TIME2 = 0 ;
#endif
stype = A->stype ;
if (stype != 0)
{
/* F not accessed */
Fp = NULL ;
Fi = NULL ;
Fx = NULL ;
Fz = NULL ;
Fnz = NULL ;
Fpacked = TRUE ;
}
else
{
Fp = F->p ;
Fi = F->i ;
Fx = F->x ;
Fz = F->z ;
Fnz = F->nz ;
Fpacked = F->packed ;
}
Ap = A->p ;
Ai = A->i ;
Ax = A->x ;
Az = A->z ;
Anz = A->nz ;
Apacked = A->packed ;
/* clear the Map so that changes in the pattern of A can be detected */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( n > 128 ) schedule (static)
for (i = 0 ; i < n ; i++)
{
Map [i] = EMPTY ;
}
/* If the matrix is not positive definite, the supernode s containing the
* first zero or negative diagonal entry of L is repeated (but factorized
* only up to just before the problematic diagonal entry). The purpose is
* to provide MATLAB with [R,p]=chol(A); columns 1 to p-1 of L=R' are
* required, where L(p,p) is the problematic diagonal entry. The
* repeat_supernode flag tells us whether this is the repeated supernode.
* Once supernode s is repeated, the factorization is terminated. */
repeat_supernode = FALSE ;
#ifdef GPU_BLAS
if ( useGPU )
{
/* Case of GPU, zero all supernodes at one time for better performance*/
TEMPLATE2 (CHOLMOD (gpu_clear_memory))(Lx, L->xsize,
CHOLMOD_OMP_NUM_THREADS);
}
#endif
/* ---------------------------------------------------------------------- */
/* supernodal numerical factorization */
/* ---------------------------------------------------------------------- */
for (s = 0 ; s < nsuper ; s++)
{
/* ------------------------------------------------------------------ */
/* get the size of supernode s */
/* ------------------------------------------------------------------ */
k1 = Super [s] ; /* s contains columns k1 to k2-1 of L */
k2 = Super [s+1] ;
nscol = k2 - k1 ; /* # of columns in all of s */
psi = Lpi [s] ; /* pointer to first row of s in Ls */
psx = Lpx [s] ; /* pointer to first row of s in Lx */
psend = Lpi [s+1] ; /* pointer just past last row of s in Ls */
nsrow = psend - psi ; /* # of rows in all of s */
PRINT1 (("====================================================\n"
"S "ID" k1 "ID" k2 "ID" nsrow "ID" nscol "ID" psi "ID" psend "
""ID" psx "ID"\n", s, k1, k2, nsrow, nscol, psi, psend, psx)) ;
/* ------------------------------------------------------------------ */
/* zero the supernode s */
/* ------------------------------------------------------------------ */
ASSERT ((size_t) (psx + nsrow*nscol) <= L->xsize) ;
pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */
#ifdef GPU_BLAS
if ( !useGPU )
#endif
{
/* Case of no GPU, zero individual supernodes */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
schedule (static) if ( pend - psx > 1024 )
for (p = psx ; p < pend ; p++) {
L_CLEAR (Lx,p);
}
}
/* ------------------------------------------------------------------ */
/* construct the scattered Map for supernode s */
/* ------------------------------------------------------------------ */
/* If row i is the kth row in s, then Map [i] = k. Similarly, if
* column j is the kth column in s, then Map [j] = k. */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( nsrow > 128 )
for (k = 0 ; k < nsrow ; k++)
{
PRINT1 ((" "ID" map "ID"\n", Ls [psi+k], k)) ;
Map [Ls [psi + k]] = k ;
}
/* ------------------------------------------------------------------ */
/* when using GPU, reorder supernodes by levels.*/
/* (all supernodes in a level are independent) */
/* ------------------------------------------------------------------ */
#ifdef GPU_BLAS
if ( useGPU )
{
TEMPLATE2 (CHOLMOD (gpu_reorder_descendants))
( Common, Super, &s, Lpi, Lpos, Head, Next, Previous,
&ndescendants, &tail, &mapCreatedOnGpu, gpu_p ) ;
}
#endif
/* ------------------------------------------------------------------ */
/* copy matrix into supernode s (lower triangular part only) */
/* ------------------------------------------------------------------ */
pk = psx ;
#pragma omp parallel for private ( p, pend, pfend, pf, i, j, imap, q ) \
num_threads(CHOLMOD_OMP_NUM_THREADS) if ( k2-k1 > 64 )
for (k = k1 ; k < k2 ; k++)
{
if (stype != 0)
{
/* copy the kth column of A into the supernode */
p = Ap [k] ;
pend = (Apacked) ? (Ap [k+1]) : (p + Anz [k]) ;
for ( ; p < pend ; p++)
{
/* row i of L is located in row Map [i] of s */
i = Ai [p] ;
if (i >= k)
{
/* This test is here simply to avoid a segfault. If
* the test is false, the numeric factorization of A
* is undefined. It does not detect all invalid
* entries, only some of them (when debugging is
* enabled, and Map is cleared after each step, then
* all entries not in the pattern of L are detected). */
imap = Map [i] ;
if (imap >= 0 && imap < nsrow)
{
/* Lx [Map [i] + pk] = Ax [p] ; */
L_ASSIGN (Lx,(imap+(psx+(k-k1)*nsrow)), Ax,Az,p) ;
}
}
}
}
else
{
double fjk[2];
/* copy the kth column of A*F into the supernode */
pf = Fp [k] ;
pfend = (Fpacked) ? (Fp [k+1]) : (p + Fnz [k]) ;
for ( ; pf < pfend ; pf++)
{
j = Fi [pf] ;
/* fjk = Fx [pf] ; */
L_ASSIGN (fjk,0, Fx,Fz,pf) ;
p = Ap [j] ;
pend = (Apacked) ? (Ap [j+1]) : (p + Anz [j]) ;
for ( ; p < pend ; p++)
{
i = Ai [p] ;
if (i >= k)
{
/* See the discussion of imap above. */
imap = Map [i] ;
if (imap >= 0 && imap < nsrow)
{
/* Lx [Map [i] + pk] += Ax [p] * fjk ; */
L_MULTADD (Lx,(imap+(psx+(k-k1)*nsrow)),
Ax,Az,p, fjk) ;
}
}
}
}
}
}
/* add beta to the diagonal of the supernode, if nonzero */
if (beta [0] != 0.0)
{
/* note that only the real part of beta is used */
pk = psx ;
for (k = k1 ; k < k2 ; k++)
{
/* Lx [pk] += beta [0] ; */
L_ASSEMBLE (Lx,pk, beta) ;
pk += nsrow + 1 ; /* advance to the next diagonal entry */
}
}
PRINT1 (("Supernode with just A: repeat: "ID"\n", repeat_supernode)) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
PRINT1 (("\n\n")) ;
/* ------------------------------------------------------------------ */
/* save/restore the list of supernodes */
/* ------------------------------------------------------------------ */
if (!repeat_supernode)
{
/* Save the list of pending descendants in case s is not positive
* definite. Also save Lpos for each descendant d, so that we can
* find which part of d is used to update s. */
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
Lpos_save [d] = Lpos [d] ;
Next_save [d] = Next [d] ;
}
}
else
{
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
Lpos [d] = Lpos_save [d] ;
Next [d] = Next_save [d] ;
}
}
/* ------------------------------------------------------------------ */
/* update supernode s with each pending descendant d */
/* ------------------------------------------------------------------ */
#ifndef NDEBUG
for (d = Head [s] ; d != EMPTY ; d = Next [d])
{
PRINT1 (("\nWill update "ID" with Child: "ID"\n", s, d)) ;
DEBUG (CHOLMOD(dump_super) (d, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
}
PRINT1 (("\nNow factorizing supernode "ID":\n", s)) ;
#endif
#ifdef GPU_BLAS
/* initialize the buffer counter */
if ( useGPU ) {
Common->ibuffer = 0;
supernodeUsedGPU = 0;
idescendant = 0;
d = Head[s];
dnext = d;
dlarge = Next[d];
dsmall = tail;
GPUavailable = 1;
skips = 0;
}
else
{
dnext = Head[s];
}
#else
/* GPU module not installed */
dnext = Head[s];
#endif
while
#ifdef GPU_BLAS
( (!useGPU && (dnext != EMPTY))
|| (useGPU && (idescendant < ndescendants)))
#else
( dnext != EMPTY )
#endif
{
#ifdef GPU_BLAS
if ( useGPU ) {
/* Conditionally select the next descendant supernode to
* assemble.
* + first, select the largest descendant
* + subsequently, if gpu host buffers are available, select
* the largest remaining descendant for assembly on the GPU
* + otherwise select the smallest remaining descendant for
* assembly on the CPU
*
* The objective is to keep the GPU busy assembling the largest
* descendants, and simultaneously keep the CPU busy assembling
* the smallest descendants.
*
* As this is called for every descendent supernode, moving
* this code to t_cholmod_gpu incurs substantial overhead -
* ~20 GF/s on audikw_1 - so it is being left here.
*/
iHostBuff =
(Common->ibuffer) % CHOLMOD_HOST_SUPERNODE_BUFFERS;
cudaError_t cuErr;
if ( idescendant > 0 ) {
if ( GPUavailable == -1 || skips > 0) {
d = dsmall;
dsmall = Previous[dsmall];
skips--;
}
else {
cuErr = cudaEventQuery
( Common->updateCBuffersFree[iHostBuff] );
if ( cuErr == cudaSuccess ) {
/* buffers are available, so assemble a large
* descendant (anticipating that this will be
* assembled on the GPU) */
d = dlarge;
dlarge = Next[dlarge];
GPUavailable = 1;
skips = 0;
}
else {
/* buffers are not available, so the GPU is busy,
* so assemble a small descendant (anticipating
* that it will be assembled on the host) */
d = dsmall;
dsmall = Previous[dsmall];
GPUavailable = 0;
/* if the GPUs are busy, then do this many
* supernodes on the CPU before querying GPUs
* again. */
skips = CHOLMOD_GPU_SKIP;
}
}
}
idescendant++;
}
else
{
d = dnext;
}
#else
/* GPU module not installed at compile time */
d = dnext ;
#endif
/* -------------------------------------------------------------- */
/* get the size of supernode d */
/* -------------------------------------------------------------- */
kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ; /* # of columns in all of d */
pdi = Lpi [d] ; /* pointer to first row of d in Ls */
pdx = Lpx [d] ; /* pointer to first row of d in Lx */
pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */
ndrow = pdend - pdi ; /* # rows in all of d */
PRINT1 (("Child: ")) ;
DEBUG (CHOLMOD(dump_super) (d, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
/* -------------------------------------------------------------- */
/* find the range of rows of d that affect rows k1 to k2-1 of s */
/* -------------------------------------------------------------- */
p = Lpos [d] ; /* offset of 1st row of d affecting s */
pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */
pdx1 = pdx + p ; /* ptr to 1st row of d affecting s in Lx */
/* there must be at least one row remaining in d to update s */
ASSERT (pdi1 < pdend) ;
PRINT1 (("Lpos[d] "ID" pdi1 "ID" Ls[pdi1] "ID"\n",
Lpos[d], pdi1, Ls [pdi1])) ;
ASSERT (Ls [pdi1] >= k1 && Ls [pdi1] < k2) ;
for (pdi2 = pdi1 ; pdi2 < pdend && Ls [pdi2] < k2 ; pdi2++) ;
ndrow1 = pdi2 - pdi1 ; /* # rows in first part of d */
ndrow2 = pdend - pdi1 ; /* # rows in remaining d */
/* rows Ls [pdi1 ... pdi2-1] are in the range k1 to k2-1. Since d
* affects s, this set cannot be empty. */
ASSERT (pdi1 < pdi2 && pdi2 <= pdend) ;
PRINT1 (("ndrow1 "ID" ndrow2 "ID"\n", ndrow1, ndrow2)) ;
DEBUG (for (p = pdi1 ; p < pdi2 ; p++)
PRINT1 (("Ls["ID"] "ID"\n", p, Ls[p]))) ;
/* -------------------------------------------------------------- */
/* construct the update matrix C for this supernode d */
/* -------------------------------------------------------------- */
/* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except
* that k1:n-1 refers to all of the rows in L, but many of the
* rows are all zero. Supernode d holds columns kd1 to kd2-1 of L.
* Nonzero rows in the range k1:k2-1 are in the list
* Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows in the range
* k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2. Let
* L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let
* L2 = L (Ls [pdi2 ... pdend], kd1:kd2-1). C is ndrow2-by-ndrow1.
* Let C1 be the first ndrow1 rows of C and let C2 be the last
* ndrow2-ndrow1 rows of C. Only the lower triangular part of C1
* needs to be computed since C1 is symmetric.
*/
/* maxcsize is the largest size of C for all pairs (d,s) */
ASSERT (ndrow2 * ndrow1 <= ((Int) L->maxcsize)) ;
/* compute leading ndrow1-by-ndrow1 lower triangular block of C,
* C1 = L1*L1' */
ndrow3 = ndrow2 - ndrow1 ; /* number of rows of C2 */
ASSERT (ndrow3 >= 0) ;
#ifdef GPU_BLAS
if ( useGPU ) {
/* set up GPU to assemble new supernode */
if ( GPUavailable == 1) {
if ( ndrow2 * L_ENTRY >= CHOLMOD_ND_ROW_LIMIT &&
ndcol * L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) {
if ( ! mapCreatedOnGpu ) {
TEMPLATE2 ( CHOLMOD (gpu_initialize_supernode))
( Common, nscol, nsrow, psi, gpu_p );
mapCreatedOnGpu = 1;
}
}
else {
/* we've reached the limit of GPU-eligible descendants
* flag to stop stop performing cudaEventQueries */
GPUavailable = -1;
}
}
}
#endif
#ifdef GPU_BLAS
if ( !useGPU
|| GPUavailable!=1
|| !TEMPLATE2 (CHOLMOD (gpu_updateC)) (ndrow1, ndrow2, ndrow,
ndcol, nsrow, pdx1, pdi1, Lx, C, Common, gpu_p))
#endif
{
/* GPU not installed, or not used */
#ifndef NTIMER
Common->CHOLMOD_CPU_SYRK_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dsyrk ("L", "N",
ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
one, /* ALPHA: 1 */
Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */
zero, /* BETA: 0 */
C, ndrow2) ; /* C, LDC: C1 */
#else
BLAS_zherk ("L", "N",
ndrow1, ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
one, /* ALPHA: 1 */
Lx + L_ENTRY*pdx1, ndrow, /* A, LDA: L1, ndrow */
zero, /* BETA: 0 */
C, ndrow2) ; /* C, LDC: C1 */
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_SYRK_TIME += SuiteSparse_time () - tstart ;
#endif
/* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C,
* C2 = L2*L1' */
if (ndrow3 > 0)
{
#ifndef NTIMER
Common->CHOLMOD_CPU_GEMM_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dgemm ("N", "C",
ndrow3, ndrow1, ndcol, /* M, N, K */
one, /* ALPHA: 1 */
Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */
ndrow, /* ndrow */
Lx + L_ENTRY*pdx1, /* B, LDB: L1 */
ndrow, /* ndrow */
zero, /* BETA: 0 */
C + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#else
BLAS_zgemm ("N", "C",
ndrow3, ndrow1, ndcol, /* M, N, K */
one, /* ALPHA: 1 */
Lx + L_ENTRY*(pdx1 + ndrow1), /* A, LDA: L2 */
ndrow, /* ndrow */
Lx + L_ENTRY*pdx1, /* B, LDB: L1, ndrow */
ndrow,
zero, /* BETA: 0 */
C + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_GEMM_TIME +=
SuiteSparse_time () - tstart ;
#endif
}
/* ---------------------------------------------------------- */
/* construct relative map to assemble d into s */
/* ---------------------------------------------------------- */
DEBUG (CHOLMOD(dump_real) ("C", C, ndrow2, ndrow1, TRUE,
L_ENTRY, Common)) ;
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if ( ndrow2 > 64 )
for (i = 0 ; i < ndrow2 ; i++)
{
RelativeMap [i] = Map [Ls [pdi1 + i]] ;
ASSERT (RelativeMap [i] >= 0 && RelativeMap [i] < nsrow) ;
}
/* ---------------------------------------------------------- */
/* assemble C into supernode s using the relative map */
/* ---------------------------------------------------------- */
#pragma omp parallel for private ( j, i, px, q ) \
num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndrow1 > 64 )
for (j = 0 ; j < ndrow1 ; j++) /* cols k1:k2-1 */
{
ASSERT (RelativeMap [j] == Map [Ls [pdi1 + j]]) ;
ASSERT (RelativeMap [j] >= 0 && RelativeMap [j] < nscol) ;
px = psx + RelativeMap [j] * nsrow ;
for (i = j ; i < ndrow2 ; i++) /* rows k1:n-1 */
{
ASSERT (RelativeMap [i] == Map [Ls [pdi1 + i]]) ;
ASSERT (RelativeMap [i] >= j && RelativeMap[i] < nsrow);
/* Lx [px + RelativeMap [i]] -= C [i + pj] ; */
q = px + RelativeMap [i] ;
L_ASSEMBLESUB (Lx,q, C, i+ndrow2*j) ;
}
}
}
#ifdef GPU_BLAS
else
{
supernodeUsedGPU = 1; /* GPU was used for this supernode*/
Common->ibuffer++; /* gpu_updateC is asynchronous, so use
* the next host buffer for the next
* supernode */
Common->ibuffer = Common->ibuffer%
(CHOLMOD_HOST_SUPERNODE_BUFFERS*CHOLMOD_DEVICE_STREAMS);
}
#endif
/* -------------------------------------------------------------- */
/* prepare this supernode d for its next ancestor */
/* -------------------------------------------------------------- */
dnext = Next [d] ;
if (!repeat_supernode)
{
/* If node s is being repeated, Head [dancestor] has already
* been cleared (set to EMPTY). It must remain EMPTY. The
* dancestor will not be factorized since the factorization
* terminates at node s. */
Lpos [d] = pdi2 - pdi ;
if (Lpos [d] < ndrow)
{
dancestor = SuperMap [Ls [pdi2]] ;
ASSERT (dancestor > s && dancestor < nsuper) ;
/* place d in the link list of its next ancestor */
Next [d] = Head [dancestor] ;
Head [dancestor] = d ;
}
}
} /* end of descendant supernode loop */
#ifdef GPU_BLAS
if ( useGPU ) {
iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
/* combine updates assembled on the GPU with updates
* assembled on the CPU */
TEMPLATE2 ( CHOLMOD (gpu_final_assembly ))
( Common, Lx, psx, nscol, nsrow, supernodeUsedGPU,
&iHostBuff, &iDevBuff, gpu_p );
}
#endif
PRINT1 (("\nSupernode with contributions A: repeat: "ID"\n",
repeat_supernode)) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
PRINT1 (("\n\n")) ;
/* ------------------------------------------------------------------ */
/* factorize diagonal block of supernode s in LL' */
/* ------------------------------------------------------------------ */
/* The current supernode s is ready to factorize. It has been updated
* by all descendant supernodes. Let S = the current supernode, which
* holds rows k1:n-1 and columns k1:k2-1 of the updated matrix. It
* splits into two parts: the square diagonal block S1, and the
* rectangular part S2. Here, S1 is factorized into L1*L1' and
* overwritten by L1.
*
* If supernode s is being repeated, only factorize it up to but not
* including the column containing the problematic entry.
*/
nscol2 = (repeat_supernode) ? (nscol_new) : (nscol) ;
#ifdef GPU_BLAS
if ( !useGPU
|| !supernodeUsedGPU
|| !TEMPLATE2 (CHOLMOD (gpu_lower_potrf))(nscol2, nsrow, psx, Lx,
&info, Common, gpu_p))
#endif
{
/* Note that the GPU will not be used for the triangular solve */
#ifdef GPU_BLAS
supernodeUsedGPU = 0;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_POTRF_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
LAPACK_dpotrf ("L",
nscol2, /* N: nscol2 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */
info) ; /* INFO */
#else
LAPACK_zpotrf ("L",
nscol2, /* N: nscol2 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: S1, nsrow */
info) ; /* INFO */
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_POTRF_TIME += SuiteSparse_time ()- tstart ;
#endif
}
/* ------------------------------------------------------------------ */
/* check if the matrix is not positive definite */
/* ------------------------------------------------------------------ */
if (repeat_supernode)
{
/* the leading part has been refactorized; it must have succeeded */
info = 0 ;
/* zero out the rest of this supernode */
p = psx + nsrow * nscol_new ;
pend = psx + nsrow * nscol ; /* s is nsrow-by-nscol */
for ( ; p < pend ; p++)
{
/* Lx [p] = 0 ; */
L_CLEAR (Lx,p) ;
}
}
/* info is set to one in LAPACK_*potrf if blas_ok is FALSE. It is
* set to zero in dpotrf/zpotrf if the factorization was successful. */
if (CHECK_BLAS_INT && !Common->blas_ok)
{
ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ;
}
if (info != 0)
{
/* Matrix is not positive definite. dpotrf/zpotrf do NOT report an
* error if the diagonal of L has NaN's, only if it has a zero. */
if (Common->status == CHOLMOD_OK)
{
ERROR (CHOLMOD_NOT_POSDEF, "matrix not positive definite") ;
}
/* L->minor is the column of L that contains a zero or negative
* diagonal term. */
L->minor = k1 + info - 1 ;
/* clear the link lists of all subsequent supernodes */
for (ss = s+1 ; ss < nsuper ; ss++)
{
Head [ss] = EMPTY ;
}
/* zero this supernode, and all remaining supernodes */
pend = L->xsize ;
for (p = psx ; p < pend ; p++)
{
/* Lx [p] = 0. ; */
L_CLEAR (Lx,p) ;
}
/* If L is indefinite, it still contains useful information.
* Supernodes 0 to s-1 are valid, similar to MATLAB [R,p]=chol(A),
* where the 1-based p is identical to the 0-based L->minor. Since
* L->minor is in the current supernode s, it and any columns to the
* left of it in supernode s are also all zero. This differs from
* [R,p]=chol(A), which contains nonzero rows 1 to p-1. Fix this
* by setting repeat_supernode to TRUE, and repeating supernode s.
*
* If Common->quick_return_if_not_posdef is true, then the entire
* supernode s is not factorized; it is left as all zero.
*/
if (info == 1 || Common->quick_return_if_not_posdef)
{
/* If the first column of supernode s contains a zero or
* negative diagonal entry, then it is already properly set to
* zero. Also, info will be 1 if integer overflow occured in
* the BLAS. */
Head [s] = EMPTY ;
#ifdef GPU_BLAS
if ( useGPU ) {
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
else
{
/* Repeat supernode s, but only factorize it up to but not
* including the column containing the problematic diagonal
* entry. */
repeat_supernode = TRUE ;
s-- ;
nscol_new = info - 1 ;
continue ;
}
}
/* ------------------------------------------------------------------ */
/* compute the subdiagonal block and prepare supernode for its parent */
/* ------------------------------------------------------------------ */
nsrow2 = nsrow - nscol2 ;
if (nsrow2 > 0)
{
/* The current supernode is columns k1 to k2-1 of L. Let L1 be the
* diagonal block (factorized by dpotrf/zpotrf above; rows/cols
* k1:k2-1), and L2 be rows k2:n-1 and columns k1:k2-1 of L. The
* triangular system to solve is L2*L1' = S2, where S2 is
* overwritten with L2. More precisely, L2 = S2 / L1' in MATLAB
* notation.
*/
#ifdef GPU_BLAS
if ( !useGPU
|| !supernodeUsedGPU
|| !TEMPLATE2 (CHOLMOD(gpu_triangular_solve))
(nsrow2, nscol2, nsrow, psx, Lx, Common, gpu_p))
#endif
{
#ifndef NTIMER
Common->CHOLMOD_CPU_TRSM_CALLS++ ;
tstart = SuiteSparse_time () ;
#endif
#ifdef REAL
BLAS_dtrsm ("R", "L", "C", "N",
nsrow2, nscol2, /* M, N */
one, /* ALPHA: 1 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */
Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */
nsrow) ;
#else
BLAS_ztrsm ("R", "L", "C", "N",
nsrow2, nscol2, /* M, N */
one, /* ALPHA: 1 */
Lx + L_ENTRY*psx, nsrow, /* A, LDA: L1, nsrow */
Lx + L_ENTRY*(psx + nscol2), /* B, LDB, L2, nsrow */
nsrow) ;
#endif
#ifndef NTIMER
Common->CHOLMOD_CPU_TRSM_TIME += SuiteSparse_time () - tstart ;
#endif
}
if (CHECK_BLAS_INT && !Common->blas_ok)
{
ERROR (CHOLMOD_TOO_LARGE, "problem too large for the BLAS") ;
}
if (!repeat_supernode)
{
/* Lpos [s] is offset of first row of s affecting its parent */
Lpos [s] = nscol ;
sparent = SuperMap [Ls [psi + nscol]] ;
ASSERT (sparent != EMPTY) ;
ASSERT (Ls [psi + nscol] >= Super [sparent]) ;
ASSERT (Ls [psi + nscol] < Super [sparent+1]) ;
ASSERT (SuperMap [Ls [psi + nscol]] == sparent) ;
ASSERT (sparent > s && sparent < nsuper) ;
/* place s in link list of its parent */
Next [s] = Head [sparent] ;
Head [sparent] = s ;
}
}
else
{
#ifdef GPU_BLAS
TEMPLATE2 ( CHOLMOD (gpu_copy_supernode) )
( Common, Lx, psx, nscol, nscol2, nsrow,
supernodeUsedGPU, iHostBuff, gpu_p);
#endif
}
Head [s] = EMPTY ; /* link list for supernode s no longer needed */
/* clear the Map (debugging only, to detect changes in pattern of A) */
DEBUG (for (k = 0 ; k < nsrow ; k++) Map [Ls [psi + k]] = EMPTY) ;
DEBUG (CHOLMOD(dump_super) (s, Super, Lpi, Ls, Lpx, Lx, L_ENTRY,
Common)) ;
if (repeat_supernode)
{
/* matrix is not positive definite; finished clean-up for supernode
* containing negative diagonal */
#ifdef GPU_BLAS
if ( useGPU )
{
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
}
/* success; matrix is positive definite */
L->minor = n ;
#ifdef GPU_BLAS
if ( useGPU )
{
CHOLMOD (gpu_end) (Common) ;
}
#endif
return (Common->status >= CHOLMOD_OK) ;
}
#undef PATTERN
#undef REAL
#undef COMPLEX
#undef ZOMPLEX
|
strMbacktest.c | //
// strMbacktest.c
// pstock
//
// Created by takayoshi on 2016/01/24.
// Copyright © 2016年 pgostation. All rights reserved.
//
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <libiomp/omp.h>
#include "strMbacktest.h"
#include "xmapString.h"
#include "calc.h"
static void strMdbacktest_in(const char* path, strMdata* data, strMsignalSum* signals,
const char** sellWords, int sellWordCount, const char** lcWords, int lcWordCount, const char** sig2Words, int sig2WordCount, const char** orderWords, int orderWordCount, int isKarauri, int split, int startDateIndex, int endDateIndex);
static int signal2filter(strMdata* data, strMsignalSum* signals, int dateIndex, const char** sig2Words, int sig2WordCount);
void strMbacktest_start(const char* path, strMdata* data, strMsignalSum* signals, int startDateIndex, int endDateIndex)
{
const char* sellWords[64] = {0x00};
int sellWordCount = 0;
const char* lcWords[64] = {0x00};
int lcWordCount = 0;
const char* sig2Words[64] = {0x00};
int sig2WordCount = 0;
const char* orderWords[64] = {0x00};
int orderWordCount = 0;
int isKarauri = 0;
int split = 10; //分割数
xmap_t* xmap;
//xmap取得
{
char filePath[256];
snprintf(filePath, sizeof(filePath)-1, "%s/conf.xmap", path);
xmap = xmap_load(filePath);
if(xmap==NULL)
{
printf("Error: strMdbacktest_start(): conf.xmap is NULL\n");
return;
}
//期間指定の場合、結果保存ディレクトリを変える
if (startDateIndex!=0 || endDateIndex < data->dailys[MAX_DATA_COUNT-1].count)
{
for (int i=(int)strlen(path)-2; i>0; i--)
{
if (path[i] == '/')
{
char strategyName[256] = {0};
char parentPath[512] = {0};
strcpy(strategyName, &path[i+1]);
strncpy(parentPath, path, i);
strcat(parentPath, "-date/");
strcat(parentPath, strategyName);
path = parentPath;
//ディレクトリ作成
{
printf("mkdir %s\n", parentPath);
mkdir(parentPath, 0777);
}
break;
}
}
}
snprintf(filePath, sizeof(filePath)-1, "%s/saveconf.xmap", path);
xmap_saveWithFilename(xmap, filePath);
}
//スペースで区切って単語配列取得
{
const char *sellRule = xmap_get(xmap, "sell");
printf("sellRule:%s\n", sellRule);
int quoteFlag = 0;
int start = 0;
const int sellRuleLen = (int)strlen(sellRule);
for(int i=0; i<=sellRuleLen; i++)
{
if(!quoteFlag && ( sellRule[i]==' ' || sellRule[i]=='\n' || i==sellRuleLen) )
{
if(i-start==0)
{
start++;
continue;
}
char *buf = malloc(i-start+1);
strncpy(buf, &sellRule[start], i-start);
buf[i-start] = '\0';
sellWords[sellWordCount] = buf;
sellWordCount++;
start = i+1;
continue;
}
if(sellRule[i]=='"')
{
quoteFlag = !quoteFlag;
}
}
}
{
const char *lcRule = xmap_get(xmap, "lc");
printf("lcRule:%s\n", lcRule);
int quoteFlag = 0;
int start = 0;
const int lcRuleLen = (int)strlen(lcRule);
for(int i=0; i<=lcRuleLen; i++)
{
if(!quoteFlag && ( lcRule[i]==' ' || lcRule[i]=='\n' || i==lcRuleLen) )
{
if(i-start==0)
{
start++;
continue;
}
char *buf = malloc(i-start+1);
strncpy(buf, &lcRule[start], i-start);
buf[i-start] = '\0';
lcWords[lcWordCount] = buf;
lcWordCount++;
start = i+1;
continue;
}
if(lcRule[i]=='"')
{
quoteFlag = !quoteFlag;
}
}
}
{
const char *sig2Rule = xmap_get(xmap, "filter2");
printf("filter2:%s\n", sig2Rule);
int quoteFlag = 0;
int start = 0;
const int sig2RuleLen = (int)strlen(sig2Rule);
for(int i=0; i<=sig2RuleLen; i++)
{
if(!quoteFlag && ( sig2Rule[i]==' ' || sig2Rule[i]=='\n' || i==sig2RuleLen) )
{
if(i-start==0)
{
start++;
continue;
}
char *buf = malloc(i-start+1);
strncpy(buf, &sig2Rule[start], i-start);
buf[i-start] = '\0';
sig2Words[sig2WordCount] = buf;
sig2WordCount++;
start = i+1;
continue;
}
if(sig2Rule[i]=='"')
{
quoteFlag = !quoteFlag;
}
}
}
{
const char *orderRule = xmap_get(xmap, "order");
printf("order:%s\n", orderRule);
int quoteFlag = 0;
int start = 0;
const int orderRuleLen = (int)strlen(orderRule);
for(int i=0; i<=orderRuleLen; i++)
{
if(!quoteFlag && ( orderRule[i]==' ' || orderRule[i]=='\n' || i==orderRuleLen) )
{
if(i-start==0)
{
start++;
continue;
}
char *buf = malloc(i-start+1);
strncpy(buf, &orderRule[start], i-start);
buf[i-start] = '\0';
orderWords[orderWordCount] = buf;
orderWordCount++;
start = i+1;
continue;
}
if(orderRule[i]=='"')
{
quoteFlag = !quoteFlag;
}
}
}
{
const char *xRule = xmap_get(xmap, "rule");
printf("rule:%s\n", xRule);
if(strcmp(xRule,"空売り")==0)
{
isKarauri = 1;
}
}
{
const char *splitStr = xmap_get(xmap, "split");
if(splitStr!=NULL)
{
sscanf(splitStr, "%d", &split);
if(split>SPLIT_MAX){
split = SPLIT_MAX;
}
}
}
strMdbacktest_in(path, data, signals, sellWords, sellWordCount, lcWords, lcWordCount, sig2Words, sig2WordCount, orderWords, orderWordCount, isKarauri, split, startDateIndex, endDateIndex);
xmap_release(xmap);
}
static void strMdbacktest_in(const char* path,strMdata* data, strMsignalSum* signals,
const char** sellWords, int sellWordCount, const char** lcWords, int lcWordCount, const char** sig2Words, int sig2WordCount, const char** orderWords, int orderWordCount, int isKarauri, int split, int startDateIndex, int endDateIndex)
{
const long initialCache = 10000000;
long cache = initialCache;
int positionCount = 0;
strMsignal positionSignal[split];
const int positionSignalListMax = 20000;
strMsignal positionSignalList[positionSignalListMax] = {0x00};
int positionSignalListCounter = 0;
long cacheHistory[signals->count];
int positionCountHistory[signals->count];
strMsignal positionSignalHistory[signals->count][split];
int filter2History[signals->count];
long yearProfit = 0;
memset(positionSignal, 0x00, sizeof(strMsignal) * split);
//1日ずつずらしながらバックテスト
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
cacheHistory[dateIndex] = -1;
//発注シグナルがあるので、発注する
while(signals->dailyCount[dateIndex] > 0 && positionCount < split && cache>0)
{
//シグナル数フィルター
int signal2filterResult = signal2filter(data, signals, dateIndex, sig2Words, sig2WordCount);
filter2History[dateIndex] = signal2filterResult;
if(!signal2filterResult)
{
break;
}
{
double orderList[signals->dailyCount[dateIndex]];
//最も優先度の高いものを発注する
#pragma omp parallel
#pragma omp sections
{
#pragma omp section
for(int i=0; i<signals->dailyCount[dateIndex]/2; i++)
{
orderList[i] = -9999999999999.9;
//すでに保有していれば発注しない
int flag = 0;
for(int j=0; j<positionCount; j++)
{
if(positionSignal[j].codeIndex == signals->dailySignal[dateIndex][i].codeIndex)
{
flag = 1;
break;
}
}
if(flag){
orderList[i] = -9999999999999.9;
continue;
}
//変な銘柄は発注しない
int code = data->codes[signals->dailySignal[dateIndex][i].codeIndex];
if(code==1773){ //株価データが壊れている
continue;
}
//貸借銘柄じゃないので空売りできない
if (isKarauri)
{
int karauriOk = data->companys[signals->dailySignal[dateIndex][i].codeIndex].isKarauriOk;
if(!karauriOk){
continue;
}
}
//優先度を計算
orderList[i] = calc(orderWords, 0, orderWordCount-1, data, NULL, NULL, dateIndex, signals->dailySignal[dateIndex][i].codeIndex);
}
#pragma omp section
for(int i=signals->dailyCount[dateIndex]/2; i<signals->dailyCount[dateIndex]; i++)
{
orderList[i] = -9999999999999.9;
//すでに保有していれば発注しない
int flag = 0;
for(int j=0; j<positionCount; j++)
{
if(positionSignal[j].codeIndex == signals->dailySignal[dateIndex][i].codeIndex)
{
flag = 1;
break;
}
}
if(flag) {
orderList[i] = -9999999999999.9;
continue;
}
//変な銘柄は発注しない
int code = data->codes[signals->dailySignal[dateIndex][i].codeIndex];
if(code==1773){ //株価データが壊れている
continue;
}
//貸借銘柄じゃないので空売りできない
if (isKarauri)
{
int karauriOk = data->companys[signals->dailySignal[dateIndex][i].codeIndex].isKarauriOk;
if(!karauriOk){
continue;
}
}
//優先度を計算
orderList[i] = calc(orderWords, 0, orderWordCount-1, data, NULL, NULL, dateIndex, signals->dailySignal[dateIndex][i].codeIndex);
}
}
double orderMax = -9999999999999.9;
strMsignal* signalMax = NULL;
int buyValue = 0;
int buyUnit = 0;
for(int i=0; i<signals->dailyCount[dateIndex]; i++)
{
if(orderList[i] > orderMax)
{
int tmpValue = signals->dailySignal[dateIndex][i].buyValue;
if(tmpValue==-1){
tmpValue = data->dailys[signals->dailySignal[dateIndex][i].codeIndex].end[dateIndex];
}
long tmpCache = cache/(split - positionCount);
if (tmpCache > initialCache / split) {
tmpCache = initialCache / split;
}
int tmpUnit = (int)(tmpCache / tmpValue);
int companyUnit = data->companys[signals->dailySignal[dateIndex][i].codeIndex].unit;
if(companyUnit > 0 && tmpUnit < companyUnit)
{
continue;
}
int tmpUnit2 = 0;
if(companyUnit > 0){
tmpUnit2 = (tmpUnit / companyUnit) * companyUnit;
} else {
tmpUnit2 = tmpUnit;
}
if(tmpUnit2==0) {
continue;
}
orderMax = orderList[i];
signalMax = &signals->dailySignal[dateIndex][i];
buyValue = tmpValue;
buyUnit = tmpUnit2;
}
}
if(signalMax==NULL){
break;
}
//発注
long money = buyValue * buyUnit;
cache -= money;
positionCount++;
memcpy(&positionSignal[positionCount-1], signalMax, sizeof(strMsignal));
positionSignal[positionCount-1].buyUnit = buyUnit;
positionSignal[positionCount-1].buyDateIndex = dateIndex;
positionSignal[positionCount-1].isKarauri = isKarauri;
positionSignal[positionCount-1].sellReason = -1;
positionSignal[positionCount-1].sellDateIndex = endDateIndex;
positionSignal[positionCount-1].sellValue = -1;
positionSignal[positionCount-1].slippage = -1;
positionSignal[positionCount-1].zei = -1;
//過去の履歴に移動
positionSignal[positionCount-1].historyIndex = positionSignalListCounter;
if(positionSignalListCounter<positionSignalListMax){
memcpy(&positionSignalList[positionSignalListCounter], &positionSignal[positionCount-1], sizeof(strMsignal));
positionSignalListCounter++;
} else {
printf("Error:strMdbacktest_in():positionSignalList size over\n");
}
//printf("buy:cache=%ld, code=%d, positionCount=%d\n", cache, data->codes[positionSignal[positionCount-1].codeIndex], positionCount);
}
}
//手仕舞い条件を計算する
for(int i=positionCount-1; i>=0; i--)
{
if(dateIndex < endDateIndex-1 && positionSignal[i].buyDateIndex == dateIndex)
{
//本日の発注なので、まだ持っていない
continue;
}
float sellCalc = calc(sellWords, 0, sellWordCount-1, data, &positionSignal[i], NULL, dateIndex, positionSignal[i].codeIndex);
float lcCalc = 0.0;
if( sellCalc <= 0.0 ){
lcCalc = calc(lcWords, 0, lcWordCount-1, data, &positionSignal[i], NULL, dateIndex, positionSignal[i].codeIndex) > 0.0;
}
if( sellCalc > 0.0 ||
lcCalc > 0.0 ||
( dateIndex < endDateIndex-2 && data->dailys[positionSignal[i].codeIndex].start[dateIndex+1]==0 && data->dailys[positionSignal[i].codeIndex].start[dateIndex+2]==0 ) ||
dateIndex >= endDateIndex-1 )
{
//printf("sell:holdDays=%d code=%d buyValue=%d sellValue=%d ", dateIndex - positionSignal[i].buyDateIndex, data->codes[positionSignal[i].codeIndex], positionSignal[i].buyValue, data->dailys[positionSignal[i].codeIndex].start[dateIndex+1<signals->count?dateIndex+1:dateIndex]);
//手仕舞い
int zei = 0;
long slippage = 0;
int nextStart = 0;
long nextVolume = 0;
if(dateIndex+1<signals->count){
nextStart = data->dailys[positionSignal[i].codeIndex].start[dateIndex+1];
nextVolume = data->dailys[positionSignal[i].codeIndex].volume[dateIndex+1];
} else {
nextStart= data->dailys[positionSignal[i].codeIndex].end[dateIndex];
}
for(int j=2; nextStart==0 && dateIndex+j < signals->count && j<5; j++){
nextStart = data->dailys[positionSignal[i].codeIndex].start[dateIndex+j];
nextVolume = data->dailys[positionSignal[i].codeIndex].volume[dateIndex+j];
}
if(nextStart==0){
//閑散としすぎ。上場廃止?
nextStart = data->dailys[positionSignal[i].codeIndex].end[dateIndex];
nextVolume = 0;
}
if ( positionSignal[i].buyValue == -1 ) {
positionSignal[i].buyValue = nextStart;
}
if(isKarauri){
//空売りで仮想的に減らしていたお金を元に戻す
long money = positionSignal[i].buyValue * (long)positionSignal[i].buyUnit;
cache += money;
//空売った時と買い戻した値段の差額が利益/損失となる
long profit = (positionSignal[i].buyValue - nextStart) * (long)positionSignal[i].buyUnit;
cache += profit;
if(profit>0 && yearProfit+profit >= 0){
zei = profit * 0.2;
}
if(profit<0 && yearProfit > 0){
zei = profit * 0.2;
}
yearProfit += profit;
} else {
//買ったお金を元に戻す
long money = positionSignal[i].buyValue * (long)positionSignal[i].buyUnit;
cache += money;
//利益/損失
long profit = (nextStart - positionSignal[i].buyValue) * (long)positionSignal[i].buyUnit;
if ( profit > money * 3 ) {
profit = money * 3; //極端な銘柄の排除
}
cache += profit;
if(profit>0 && yearProfit+profit >= 0){
zei = profit * 0.2;
}
if(profit<0 && yearProfit > 0){
zei = profit * 0.2;
}
yearProfit += profit;
}
cache -= zei;
//スリッページ計算(発注時には計算せず、手仕舞い時のみ計算している)
if(nextVolume > 1000 && positionSignal[i].buyUnit >= nextVolume){
slippage = (nextStart * positionSignal[i].buyUnit) / 40;
}
else if(nextVolume > 1000 && positionSignal[i].buyUnit >= nextVolume/2){
slippage = (nextStart * positionSignal[i].buyUnit) / 60;
}
else if(nextVolume > 1000 && positionSignal[i].buyUnit >= nextVolume/5){
slippage = (nextStart * positionSignal[i].buyUnit) / 80;
}
else if(positionSignal[i].buyUnit >= nextVolume/10){
slippage = (nextStart * positionSignal[i].buyUnit) / 100;
}
else if(positionSignal[i].buyUnit >= nextVolume/20){
slippage = (nextStart * positionSignal[i].buyUnit) / 150;
}
else if(positionSignal[i].buyUnit >= nextVolume/50){
slippage = (nextStart * positionSignal[i].buyUnit) / 200;
}
else if(positionSignal[i].buyUnit >= nextVolume/100){
slippage = (nextStart * positionSignal[i].buyUnit) / 400;
}
cache -= slippage;
yearProfit -= slippage;
positionSignal[i].sellDateIndex = dateIndex;
positionSignal[i].sellValue = nextStart;
positionSignal[i].slippage = (int)slippage;
positionSignal[i].zei = zei;
if(sellCalc > 0.0) {
positionSignal[i].sellReason = 1;
} else if(lcCalc > 0.0 ) {
positionSignal[i].sellReason = 2;
} else if(dateIndex >= signals->count-1){
positionSignal[i].sellReason = 3;
} else {
positionSignal[i].sellReason = 0;
}
//過去の履歴に決済情報をコピー
if(positionSignal[i].historyIndex < positionSignalListMax){
memcpy(&positionSignalList[positionSignal[i].historyIndex], &positionSignal[i], sizeof(strMsignal));
}
//現在のポジションから削除
memcpy(&positionSignal[i], &positionSignal[positionCount-1], sizeof(strMsignal));
positionCount--;
//printf(" cache=%ld\n", cache);
}
}
//年が変わると、年間利益額をリセットする
if(dateIndex+1 < signals->count && data->date[dateIndex+1]>>16 > data->date[dateIndex]>>16)
{
yearProfit = 0;
}
//資産額計算のための履歴データ
{
cacheHistory[dateIndex] = cache;
positionCountHistory[dateIndex] = positionCount;
memcpy(positionSignalHistory[dateIndex], positionSignal, sizeof(positionSignal));
}
}
//資産額の計算
long stockHistory[signals->count];
memset(stockHistory, 0x00, sizeof(stockHistory));
{
for(int dateIndex=0; dateIndex<endDateIndex; dateIndex++)
{
for(int i=0; i<positionCountHistory[dateIndex]; i++)
{
int unit = positionSignalHistory[dateIndex][i].buyUnit;
int codeIndex = positionSignalHistory[dateIndex][i].codeIndex;
int lastEnd = data->dailys[codeIndex].end[dateIndex];
for(int j=1; lastEnd==0 && dateIndex-j > 0 && j<30; j++){
lastEnd = data->dailys[codeIndex].end[dateIndex-j];
}
long stock;
if(isKarauri){
//空売りで仮想的に減らしていたお金
long base = positionSignalHistory[dateIndex][i].buyValue * (long)unit;
//空売った時と買い戻した値段の差額が利益/損失となる
long profit = (positionSignalHistory[dateIndex][i].buyValue - lastEnd) * (long)unit;
stock = base + profit;
} else {
//買ったお金を戻す
long base = positionSignalHistory[dateIndex][i].buyValue * (long)unit;
//利益/損失
long profit = (lastEnd - positionSignalHistory[dateIndex][i].buyValue) * (long)unit;
if (profit > base * 3) {
profit = base * 3;
}
stock = base + profit;
}
stockHistory[dateIndex] += stock;
}
}
}
//allshisan.xmapの保存
{
xmap_t* xmap = xmap_load(NULL);
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "date%d", dateIndex-startDateIndex);
snprintf(value, 32-1, "%d", data->date[dateIndex]);
xmap_set(xmap, key, value);
}
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "cache%d", dateIndex-startDateIndex);
snprintf(value, 32-1, "%ld", cacheHistory[dateIndex]);
xmap_set(xmap, key, value);
}
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "stock%d", dateIndex-startDateIndex);
snprintf(value, 32-1, "%ld", stockHistory[dateIndex]);
xmap_set(xmap, key, value);
}
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "positionCountHistory%d", dateIndex-startDateIndex);
snprintf(value, 32-1, "%d", positionCountHistory[dateIndex]);
xmap_set(xmap, key, value);
}
char filePath[256];
snprintf(filePath, sizeof(filePath)-1, "%s/allshisan.xmap", path);
xmap_saveWithFilename(xmap, filePath);
xmap_release(xmap);
}
//allSignals.xmapの保存
{
xmap_t* xmap = xmap_load(NULL);
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "date");
snprintf(value, 32-1, "%d", data->date[signals->count]);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "signalCount");
snprintf(value, 32-1, "%d", positionSignalListCounter);
xmap_set(xmap, key, value);
char strategyName[256] = "";
for(int c=(int)strlen(path)-1; c>=0; c--)
{
if(path[c]=='/')
{
strcpy(strategyName, &path[c+1]);
if(strategyName[strlen(strategyName)-1]=='/'){
strategyName[strlen(strategyName)-1]='\0';
}
}
}
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 128);
snprintf(key, 32-1, "strategyName");
snprintf(value, 128-1, "%s", strategyName);
xmap_set(xmap, key, value);
for(int positionSignalListIndex=0; positionSignalListIndex < positionSignalListCounter; positionSignalListIndex++)
{
strMsignal* signal = &positionSignalList[positionSignalListIndex];
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "buyDateIndex%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->buyDateIndex);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "buyDate__%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", data->date[signal->buyDateIndex]);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "buyValue%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->buyValue);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "code%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", data->codes[signal->codeIndex]);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "rule%d", positionSignalListIndex);
snprintf(value, 32-1, "%s", signal->isKarauri?"空売り":"買い");
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "sellDateIndex%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->sellDateIndex);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "sellDate__%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", data->date[signal->sellDateIndex]);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "sellReason%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->sellReason);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "sellValue%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->sellValue);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "slippage%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->slippage);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "unit%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->buyUnit);
xmap_set(xmap, key, value);
key = apr_palloc(xmap->pool, 32);
value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "zei%d", positionSignalListIndex);
snprintf(value, 32-1, "%d", signal->zei);
xmap_set(xmap, key, value);
}
xmap_set(xmap, "HoldDate", "0");
char filePath[256];
snprintf(filePath, sizeof(filePath)-1, "%s/allSignals.xmap", path);
xmap_saveWithFilename(xmap, filePath);
xmap_release(xmap);
}
//extraResult.xmapの保存
{
xmap_t* xmap = xmap_load(NULL);
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "date%d", dateIndex-startDateIndex);
snprintf(value, 32-1, "%d", data->date[dateIndex]);
xmap_set(xmap, key, value);
}
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "signalCount%d", dateIndex-startDateIndex);
snprintf(value, 32-1, "%d", signals->dailyCount[dateIndex]);
xmap_set(xmap, key, value);
}
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "codes%d", dateIndex-startDateIndex);
char* value = apr_palloc(xmap->pool, 6*signals->dailyCount[dateIndex]);
memset(value, 0x00, 6*signals->dailyCount[dateIndex]);
for(int i=0; i<signals->dailyCount[dateIndex]; i++)
{
char codeStr[16];
snprintf(codeStr, sizeof(codeStr)-1, "%d,", data->codes[signals->dailySignal[dateIndex][i].codeIndex]);
strcat(value, codeStr);
}
xmap_set(xmap, key, value);
}
char filePath[256];
snprintf(filePath, sizeof(filePath)-1, "%s/extraResult.xmap", path);
xmap_saveWithFilename(xmap, filePath);
xmap_release(xmap);
}
//extraSignals.xmapの保存
{
xmap_t* xmap = xmap_load(NULL);
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
if(signals->dailyCount[dateIndex]==0){
continue;
}
char* key = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "codes%d", data->date[dateIndex]);
char* value = apr_palloc(xmap->pool, 6*signals->dailyCount[dateIndex]);
memset(value, 0x00, 6*signals->dailyCount[dateIndex]);
for(int i=0; i<signals->dailyCount[dateIndex]; i++)
{
char codeStr[16];
snprintf(codeStr, sizeof(codeStr)-1, "%d ", data->codes[signals->dailySignal[dateIndex][i].codeIndex]);
strcat(value, codeStr);
}
xmap_set(xmap, key, value);
}
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "filter2%d", data->date[dateIndex]);
snprintf(value, 32-1, "%s", filter2History[dateIndex]?"true":"false");
xmap_set(xmap, key, value);
}
char filePath[256];
snprintf(filePath, sizeof(filePath)-1, "%s/extraSignals.xmap", path);
xmap_saveWithFilename(xmap, filePath);
xmap_release(xmap);
}
//filter2signal.xmapの保存
{
xmap_t* xmap = xmap_load(NULL);
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
if(signals->dailyCount[dateIndex]==0){
continue;
}
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "signalCount%d", data->date[dateIndex]);
snprintf(value, 32-1, "%d ", signals->dailyCount[dateIndex]);
xmap_set(xmap, key, value);
}
for(int dateIndex=startDateIndex; dateIndex<endDateIndex; dateIndex++)
{
if(filter2History[dateIndex]){
char* key = apr_palloc(xmap->pool, 32);
char* value = apr_palloc(xmap->pool, 32);
snprintf(key, 32-1, "filter2_%d", data->date[dateIndex]);
snprintf(value, 32-1, "-");
xmap_set(xmap, key, value);
}
}
char filePath[256];
snprintf(filePath, sizeof(filePath)-1, "%s/filter2signal.xmap", path);
xmap_saveWithFilename(xmap, filePath);
xmap_release(xmap);
}
}
static int signal2filter(strMdata* data, strMsignalSum* signalSum, int dateIndex, const char** sig2Words, int sig2WordCount)
{
if(sig2WordCount==0) return 1;
int dummyCodeIndex = -1;
return calc(sig2Words, 0, sig2WordCount-1, data, NULL, signalSum, dateIndex, dummyCodeIndex) > 0.0;
}
|
huffcode.c | /*
* huffcode - Encode/Decode files using Huffman encoding.
* http://huffman.sourceforge.net
* Copyright (C) 2003 Douglas Ryan Richardson
*/
#include "huffman.h"
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <stdlib.h>
#include <assert.h>
#include <omp.h>
#ifdef WIN32
#include <malloc.h>
extern int getopt(int, char**, char*);
extern char* optarg;
#else
#include <unistd.h>
#endif
#define THREADS 4
static unsigned int memory_encode_read_file(FILE *in,
unsigned char **buf, unsigned long sz);
static unsigned int memory_decode_read_file(FILE *in,
unsigned char **buf, unsigned long sz);
static void
version(FILE *out)
{
fputs("huffcode 0.3\n"
"Copyright (C) 2003 Douglas Ryan Richardson"
"; Gauss Interprise, Inc\n",
out);
}
static void
usage(FILE* out)
{
fputs("Usage: huffcode [-i<input file>] [-o<output file>] [-d|-c]\n"
"-i - input file (default is standard input)\n"
"-o - output file (default is standard output)\n"
"-d - decompress\n"
"-c - compress (default)\n"
"-m - read file into memory, compress, then write to file (not default)\n",
out);
}
int
main(int argc, char** argv)
{
unsigned char *buf[THREADS] = {NULL, NULL, NULL, NULL};
char memory = 1;
char compress = 1;
int opt;
unsigned int i, cur[THREADS];
const char *file_in = NULL, *file_out = NULL;
unsigned char* bufout = NULL;
unsigned int bufoutlen = 0;
FILE *out = stdout;
/* Get the command line arguments. */
while((opt = getopt(argc, argv, "i:o:cdhvm")) != -1)
{
switch(opt)
{
case 'i':
file_in = optarg;
break;
case 'o':
file_out = optarg;
break;
case 'c':
compress = 1;
break;
case 'd':
compress = 0;
break;
case 'h':
usage(stdout);
return 0;
case 'v':
version(stdout);
return 0;
default:
usage(stderr);
return 1;
}
}
FILE *fp[THREADS];
/* If an input file is given then open it
* on several positions
*/
if(file_in)
{
#pragma omp parallel for schedule(dynamic) \
num_threads(THREADS)
for (i = 0; i < THREADS; ++i) {
fp[i] = fopen(file_in, "rb");
if(!fp[i])
{
fprintf(stderr,
"Can't open input file '%s': %s\n",
file_in, strerror(errno));
exit(1);
}
}
}
/* If an output file is given then create it. */
if(file_out)
{
out = fopen(file_out, "wb");
if(!out)
{
fprintf(stderr,
"Can't open output file '%s': %s\n",
file_out, strerror(errno));
return 1;
}
}
/**
* Get file size
*/
fseek(fp[0], 0L, SEEK_END);
unsigned long sz = (unsigned long)ftell(fp[0]);
fseek(fp[0], 0L, SEEK_SET);
/**
* Increment each file pointer to its specific chunk size
*/
#pragma omp parallel for schedule(dynamic) \
num_threads(THREADS)
for(i = 0; i < THREADS; ++i)
{
fseek(fp[i], i * (unsigned long) (sz / THREADS), SEEK_SET);
}
if(memory)
{
if (compress) {
/**
* Read file from disk in parallel
*/
#pragma omp parallel for schedule(dynamic) \
num_threads(THREADS)
for(i = 0; i < THREADS; ++i) {
cur[i] = memory_encode_read_file(fp[i], &buf[i], (unsigned long) (sz / THREADS));
}
// Allocate the new full buffer
int newSize = 0;
for(i = 0; i < THREADS; ++i) {
newSize += strlen(buf[i]);
}
/**
* Copy the contents of all
* partial buffers into one
*/
char *scarlat = malloc(newSize * sizeof(char));
strcpy(scarlat, buf[0]);
for (i = 1; i < THREADS; ++i) {
strcat(scarlat, buf[i]);
}
// for (i = 0; i < THREADS; ++i) {
// free(buf[i]);
// buf[i] = NULL;
// }
/**
* Do actual huffman algorithm
* TODO - add 1 thread to write to memory the table
* - add 4 threads to write to memory their segments of content
*/
if(huffman_encode_memory(scarlat, newSize, &bufout, &bufoutlen))
{
free(scarlat);
return 1;
}
free(scarlat);
/* Write the memory to the file. */
if(fwrite(bufout, 1, bufoutlen, out) != bufoutlen)
{
free(bufout);
return 1;
}
free(bufout);
}
else {
int a, pos = 0;
unsigned long size = sz / THREADS;
#pragma omp parallel for schedule(dynamic) \
num_threads(THREADS)
for(i = 0; i < THREADS; ++i) {
if (i == THREADS - 1) {
size = sz - (THREADS - 1) * size;
}
cur[i] = memory_decode_read_file(fp[i], &buf[i], size);
}
unsigned int sum = 0;
for(i = 0; i < THREADS; i++) {
sum += cur[i];
}
char *scarlat = malloc(sum * sizeof(char));
for (i = 0; i < THREADS; ++i) {
memcpy(scarlat + pos, buf[i], cur[i]);
pos += cur[i];
}
// for (i = 0; i < THREADS; i++) {
// free(buf[i]);
// buf[i] = NULL;
// }
/* Decode the memory. */
if(huffman_decode_memory(scarlat, sum, &bufout, &bufoutlen))
{
free(scarlat);
return 1;
}
free(scarlat);
// Write the memory to the file.
if(fwrite(bufout, 1, bufoutlen, out) != bufoutlen)
{
free(bufout);
return 1;
}
free(bufout);
}
return 0;
}
}
static unsigned int
memory_encode_read_file(FILE *in,
unsigned char **buf, unsigned long sz)
{
unsigned int i, len = 0, cur = 0, inc = 1024;
assert(in);
/* Read the file into memory. */
for(i = 0; i < (unsigned int)sz; i += inc)
{
//printf("%d\n", omp_get_thread_num());
unsigned char *tmp;
len += inc;
tmp = (unsigned char*)realloc(*buf, len);
if(!tmp)
{
if(*buf)
free(buf);
return -1;
}
*buf = tmp;
if(cur + inc > sz) {
cur += fread(*buf + cur, 1, (unsigned int)(sz - cur), in);
}
else {
cur += fread(*buf + cur, 1, inc, in);
}
}
if(NULL != *buf) {
return cur;
}
return -1;
}
static unsigned int
memory_decode_read_file(FILE *in,
unsigned char **buf, unsigned long sz)
{
unsigned int i, len = 0, cur = 0, inc = 1024;
assert(in);
/* Read the file into memory. */
for (i = 0; i < (unsigned int)sz; i+=inc)
{
unsigned char *tmp;
len += inc;
tmp = (unsigned char*)realloc(*buf, len);
if(!tmp)
{
if(*buf) {
free(*buf);
}
return 1;
}
*buf = tmp;
if(cur + inc > sz) {
cur += fread(*buf + cur, 1, (unsigned int)(sz - cur), in);
}
else {
cur += fread(*buf + cur, 1, inc, in);
}
}
if(NULL != *buf) {
return cur;
}
return -1;
}
|
conv_kernel_x86.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: quanwang@openailab.com
*/
#include <stdint.h>
#include <stdlib.h>
#include <math.h>
#include "../conv_hcl_kernel.h"
#include "wino_conv_kernel_x86.h"
#if __SSE2__
#include <emmintrin.h>
#endif
#include <sys/time.h>
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
static double get_current_time()
{
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec / 1000.0;
}
static int get_private_mem_size(struct ir_tensor* filter)
{
if (filter->data_type == TENGINE_DT_UINT8) // simulator uint8 inference with fp32
return filter->elem_num * filter->elem_size * 4;
else
return filter->elem_num * filter->elem_size; // caution
}
static void interleave(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* simply copy the data */
memcpy(priv_info->interleave_buffer, filter->data, filter->elem_num * filter->elem_size);
}
static void interleave_uint8(struct ir_tensor* filter, struct conv_priv_info* priv_info)
{
/* dequant uint8 weight to fp32 for simulator */
float* weight_fp32 = (float* )priv_info->interleave_buffer;
uint8_t* weight_uint8 = (uint8_t*)filter->data;
float scale = filter->scale;
int zero_point = filter->zero_point;
for (int i = 0; i < filter->elem_num; i++)
{
weight_fp32[i] = ((float)weight_uint8[i] - (float)zero_point) * scale;
}
}
void im2col_fp32(float* data_img, float* data_col, int inh, int inw, int inc, int outh, int outw, int ksize_h,
int ksize_w, int sh, int sw, int ph, int pw, int dh, int dw)
{
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
float* in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
*(out++) = *in;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
void im2col_uint8(uint8_t* data_img, float* data_col, struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, struct conv_param* param)
{
int ksize_h = param->kernel_h;
int ksize_w = param->kernel_w;
int inc = param->input_channel / param->group;
int sh = param->stride_h;
int sw = param->stride_w;
int ph = param->pad_h0;
int pw = param->pad_w0;
int dh = param->dilation_h;
int dw = param->dilation_w;
int inh = input_tensor->dims[2];
int inw = input_tensor->dims[3];
int outh = output_tensor->dims[2];
int outw = output_tensor->dims[3];
float scale = input_tensor->scale;
int zero_point = input_tensor->zero_point;
const int channels_col = ksize_h * ksize_w * inc;
for (int c = 0; c < channels_col; ++c)
{
const int kw = c % ksize_w;
int c_ = c / ksize_w;
const int kh = c_ % ksize_h;
c_ = c_ / ksize_h;
const int im_col = kw * dw - pw;
const int w_low = max(0, -im_col / sw + (-im_col % sw > 0));
const int w_high = min(outw, (inw - im_col) / sw + ((inw - im_col) % sw > 0));
for (int h = 0; h < outh; ++h)
{
const int im_row = kh * dh + h * sh - ph;
float* out = data_col + (c * outh + h) * outw;
const float* end = out + w_high;
if (im_row >= 0 && im_row < inh)
{
uint8_t * in = data_img + inw * (im_row + inh * c_) + im_col + (w_low - 1) * sw;
memset(out, 0, w_low * sizeof(float));
out += w_low;
while (out < end)
{
in += sw;
float in_fp32 = ((float)in[0] - (float)zero_point) * scale;
out[0] = in_fp32;
out++;
}
memset(out, 0, (outw - w_high) * sizeof(float));
}
else
{
memset(out, 0, outw * sizeof(float));
}
}
}
}
static void im2col_ir(struct ir_tensor* input, struct ir_tensor* output, struct conv_priv_info* priv_info,
struct conv_param* param, int n, int group)
{
int input_chan = param->input_channel / param->group;
int image_size = input->dims[1] * input->dims[2] * input->dims[3];
int group_size = input_chan * input->dims[2] * input->dims[3];
void* input_base = input->data + (n * image_size + group * group_size) * input->elem_size;
void* im2col_buf = priv_info->im2col_buffer;
if (input->data_type == TENGINE_DT_UINT8)
im2col_uint8(input_base, im2col_buf, input, output, param);
else
im2col_fp32(input_base, im2col_buf, input->dims[2], input->dims[3], input_chan, output->dims[2], output->dims[3],
param->kernel_h, param->kernel_w, param->stride_h, param->stride_w, param->pad_h0, param->pad_w0, param->dilation_h, param->dilation_w);
}
#if __AVX__
void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 3;
int remian_size_start = nn_size << 3;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 8;
const float* img = pB + i;
float* tmp = pB_t + (i / 8) * 8 * K;
for (int j = 0; j < K; j++)
{
#if __AVX__
_mm256_storeu_ps(tmp, _mm256_loadu_ps(img));
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
tmp[4] = img[4];
tmp[5] = img[5];
tmp[6] = img[6];
tmp[7] = img[7];
#endif // __SSE__
tmp += 8;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 8 + i % 8) * 8 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 8;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
float* output4 = pC + (i + 4) * N;
float* output5 = pC + (i + 5) * N;
float* output6 = pC + (i + 6) * N;
float* output7 = pC + (i + 7) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
__m256 _sum4 = _mm256_set1_ps(0.0);
__m256 _sum5 = _mm256_set1_ps(0.0);
__m256 _sum6 = _mm256_set1_ps(0.0);
__m256 _sum7 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb0, _va0, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va1, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va2, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va3, _sum7); // sum7 = (a00-a07) * k70
va += 8;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb1, _va0, _sum4); // sum4 += (a10-a17) * k41
_sum5 = _mm256_fmadd_ps(_vb1, _va1, _sum5); // sum5 += (a10-a17) * k51
_sum6 = _mm256_fmadd_ps(_vb1, _va2, _sum6); // sum6 += (a10-a17) * k61
_sum7 = _mm256_fmadd_ps(_vb1, _va3, _sum7); // sum7 += (a10-a17) * k71
va += 8;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb2, _va0, _sum4); // sum4 += (a20-a27) * k42
_sum5 = _mm256_fmadd_ps(_vb2, _va1, _sum5); // sum5 += (a20-a27) * k52
_sum6 = _mm256_fmadd_ps(_vb2, _va2, _sum6); // sum6 += (a20-a27) * k62
_sum7 = _mm256_fmadd_ps(_vb2, _va3, _sum7); // sum7 += (a20-a27) * k72
va += 8;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
_va0 = _mm256_broadcast_ss(va + 4);
_va1 = _mm256_broadcast_ss(va + 5);
_va2 = _mm256_broadcast_ss(va + 6);
_va3 = _mm256_broadcast_ss(va + 7);
_sum4 = _mm256_fmadd_ps(_vb3, _va0, _sum4); // sum4 += (a30-a37) * k43
_sum5 = _mm256_fmadd_ps(_vb3, _va1, _sum5); // sum5 += (a30-a37) * k53
_sum6 = _mm256_fmadd_ps(_vb3, _va2, _sum6); // sum6 += (a30-a37) * k63
_sum7 = _mm256_fmadd_ps(_vb3, _va3, _sum7); // sum7 += (a30-a37) * k73
va += 8;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _va4 = _mm256_broadcast_ss(va + 4);
__m256 _va5 = _mm256_broadcast_ss(va + 5);
__m256 _va6 = _mm256_broadcast_ss(va + 6);
__m256 _va7 = _mm256_broadcast_ss(va + 7);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
_sum4 = _mm256_fmadd_ps(_vb0, _va4, _sum4); // sum4 = (a00-a07) * k40
_sum5 = _mm256_fmadd_ps(_vb0, _va5, _sum5); // sum5 = (a00-a07) * k50
_sum6 = _mm256_fmadd_ps(_vb0, _va6, _sum6); // sum6 = (a00-a07) * k60
_sum7 = _mm256_fmadd_ps(_vb0, _va7, _sum7); // sum7 = (a00-a07) * k70
va += 8;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
_mm256_storeu_ps(output4, _sum4);
_mm256_storeu_ps(output5, _sum5);
_mm256_storeu_ps(output6, _sum6);
_mm256_storeu_ps(output7, _sum7);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
float sum4[8] = {0};
float sum5[8] = {0};
float sum6[8] = {0};
float sum7[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
sum4[n] += va[4] * vb[n];
sum5[n] += va[5] * vb[n];
sum6[n] += va[6] * vb[n];
sum7[n] += va[7] * vb[n];
}
va += 8;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
output4[n] = sum4[n];
output5[n] = sum5[n];
output6[n] = sum6[n];
output7[n] = sum7[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
output4 += 8;
output5 += 8;
output6 += 8;
output7 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m256 _sum0_7 = _mm256_set1_ps(0.0);
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _vb1 = _mm256_broadcast_ss(vb + 1);
__m256 _vb2 = _mm256_broadcast_ss(vb + 2);
__m256 _vb3 = _mm256_broadcast_ss(vb + 3);
__m256 _va0 = _mm256_loadu_ps(va);
__m256 _va1 = _mm256_loadu_ps(va + 8);
__m256 _va2 = _mm256_loadu_ps(va + 16);
__m256 _va3 = _mm256_loadu_ps(va + 24);
_sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k70) * a00
_sum1 = _mm256_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k71) * a10
_sum2 = _mm256_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k72) * a20
_sum3 = _mm256_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k73) * a30
va += 32;
vb += 4;
}
_sum0 = _mm256_add_ps(_sum0, _sum1);
_sum2 = _mm256_add_ps(_sum2, _sum3);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum0);
_sum0_7 = _mm256_add_ps(_sum0_7, _sum2);
for (; k < K; k++)
{
__m256 _vb0 = _mm256_broadcast_ss(vb);
__m256 _va = _mm256_loadu_ps(va);
_sum0_7 = _mm256_fmadd_ps(_va, _vb0, _sum0_7); // sum0 += (k00-k70) * a00
va += 8;
vb += 1;
}
float output_sum0_7[8] = {0.f};
_mm256_storeu_ps(output_sum0_7, _sum0_7);
output0[0] = output_sum0_7[0];
output1[0] = output_sum0_7[1];
output2[0] = output_sum0_7[2];
output3[0] = output_sum0_7[3];
output4[0] = output_sum0_7[4];
output5[0] = output_sum0_7[5];
output6[0] = output_sum0_7[6];
output7[0] = output_sum0_7[7];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
sum4 += va[4] * vb[0];
sum5 += va[5] * vb[0];
sum6 += va[6] * vb[0];
sum7 += va[7] * vb[0];
va += 8;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
output4[0] = sum4;
output5[0] = sum5;
output6[0] = sum6;
output7[0] = sum7;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
output4++;
output5++;
output6++;
output7++;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int i = remain_outch_start + pp * 4;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
__m256 _sum1 = _mm256_set1_ps(0.0);
__m256 _sum2 = _mm256_set1_ps(0.0);
__m256 _sum3 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
// k1
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb1, _va0, _sum0); // sum0 += (a10-a17) * k01
_sum1 = _mm256_fmadd_ps(_vb1, _va1, _sum1); // sum1 += (a10-a17) * k11
_sum2 = _mm256_fmadd_ps(_vb1, _va2, _sum2); // sum2 += (a10-a17) * k21
_sum3 = _mm256_fmadd_ps(_vb1, _va3, _sum3); // sum3 += (a10-a17) * k31
va += 4;
// k2
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb2, _va0, _sum0); // sum0 += (a20-a27) * k02
_sum1 = _mm256_fmadd_ps(_vb2, _va1, _sum1); // sum1 += (a20-a27) * k12
_sum2 = _mm256_fmadd_ps(_vb2, _va2, _sum2); // sum2 += (a20-a27) * k22
_sum3 = _mm256_fmadd_ps(_vb2, _va3, _sum3); // sum3 += (a20-a27) * k32
va += 4;
// k3
_va0 = _mm256_broadcast_ss(va);
_va1 = _mm256_broadcast_ss(va + 1);
_va2 = _mm256_broadcast_ss(va + 2);
_va3 = _mm256_broadcast_ss(va + 3);
_sum0 = _mm256_fmadd_ps(_vb3, _va0, _sum0); // sum0 += (a30-a37) * k03
_sum1 = _mm256_fmadd_ps(_vb3, _va1, _sum1); // sum1 += (a30-a37) * k13
_sum2 = _mm256_fmadd_ps(_vb3, _va2, _sum2); // sum2 += (a30-a37) * k23
_sum3 = _mm256_fmadd_ps(_vb3, _va3, _sum3); // sum3 += (a30-a37) * k33
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum1 = _mm256_fmadd_ps(_vb0, _va1, _sum1); // sum1 = (a00-a07) * k10
_sum2 = _mm256_fmadd_ps(_vb0, _va2, _sum2); // sum2 = (a00-a07) * k20
_sum3 = _mm256_fmadd_ps(_vb0, _va3, _sum3); // sum3 = (a00-a07) * k30
va += 4;
vb += 8;
}
_mm256_storeu_ps(output0, _sum0);
_mm256_storeu_ps(output1, _sum1);
_mm256_storeu_ps(output2, _sum2);
_mm256_storeu_ps(output3, _sum3);
#else
float sum0[8] = {0};
float sum1[8] = {0};
float sum2[8] = {0};
float sum3[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __AVX__
output0 += 8;
output1 += 8;
output2 += 8;
output3 += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
#if __AVX__
__m128 _sum0_3 = _mm_set1_ps(0.0);
__m128 _sum0 = _mm_set1_ps(0.0);
__m128 _sum1 = _mm_set1_ps(0.0);
__m128 _sum2 = _mm_set1_ps(0.0);
__m128 _sum3 = _mm_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_fmadd_ps(_va0, _vb0, _sum0); // sum0 += (k00-k30) * a00
_sum1 = _mm_fmadd_ps(_va1, _vb1, _sum1); // sum1 += (k01-k31) * a10
_sum2 = _mm_fmadd_ps(_va2, _vb2, _sum2); // sum2 += (k02-k32) * a20
_sum3 = _mm_fmadd_ps(_va3, _vb3, _sum3); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < K; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_fmadd_ps(_va, _vb0, _sum0_3); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
float output_sum0_3[4] = {0.f};
_mm_storeu_ps(output_sum0_3, _sum0_3);
output0[0] = output_sum0_3[0];
output1[0] = output_sum0_3[1];
output2[0] = output_sum0_3[2];
output3[0] = output_sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __AVX__
output0++;
output1++;
output2++;
output3++;
}
}
remain_outch_start += nn_outch << 2;
// output ch0
for (int i = remain_outch_start; i < M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 7 < N; j += 8)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8) * 8 * K;
#if __AVX__
__m256 _sum0 = _mm256_set1_ps(0.0);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _va1 = _mm256_broadcast_ss(va + 1);
__m256 _va2 = _mm256_broadcast_ss(va + 2);
__m256 _va3 = _mm256_broadcast_ss(va + 3);
__m256 _vb0 = _mm256_loadu_ps(vb);
__m256 _vb1 = _mm256_loadu_ps(vb + 8);
__m256 _vb2 = _mm256_loadu_ps(vb + 16);
__m256 _vb3 = _mm256_loadu_ps(vb + 24);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
_sum0 = _mm256_fmadd_ps(_vb1, _va1, _sum0); // sum0 += (a10-a17) * k01
_sum0 = _mm256_fmadd_ps(_vb2, _va2, _sum0); // sum0 += (a20-a27) * k02
_sum0 = _mm256_fmadd_ps(_vb3, _va3, _sum0); // sum0 += (a30-a37) * k03
va += 4;
vb += 32;
}
for (; k < K; k++)
{
// k0
__m256 _va0 = _mm256_broadcast_ss(va);
__m256 _vb0 = _mm256_loadu_ps(vb);
_sum0 = _mm256_fmadd_ps(_vb0, _va0, _sum0); // sum0 = (a00-a07) * k00
va += 1;
vb += 8;
}
_mm256_storeu_ps(output, _sum0);
#else
float sum[8] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 8; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 8;
}
for (int n = 0; n < 8; n++)
{
output[n] = sum[n];
}
#endif // __AVX__
output += 8;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 8 + (i % 8) / 4 + i % 4) * 8 * K;
float* vb = pB_t + (j / 8 + j % 8) * 8 * K;
int k = 0;
#if __AVX__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < K; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = 0.f;
#endif // __AVX__
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
#else // SSE2
void input_pack4(int K, int N, float* pB, float* pB_t, int num_thread)
{
int nn_size = N >> 2;
int remian_size_start = nn_size << 2;
// [ch00, ch10, ch20, ch30, ch01, ch11, ch21, ch31, ch02, ch12, ch22, ch32, ch03, ch13, ch23, ch33 ....]
#pragma omp parallel for num_threads(num_thread)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 4;
const float* img = pB + i;
float* tmp = pB_t + (i / 4) * 4 * K;
for (int j = 0; j < K; j++)
{
#if __SSE__
_mm_storeu_ps(tmp, _mm_loadu_ps(img));
#else
tmp[0] = img[0];
tmp[1] = img[1];
tmp[2] = img[2];
tmp[3] = img[3];
#endif // __SSE__
tmp += 4;
img += N;
}
}
// [ch00, ch01, ch02, ch03 ....]
#pragma omp parallel for num_threads(num_thread)
for (int i = remian_size_start; i < N; i++)
{
const float* img = pB + i;
float* tmp = pB_t + (i / 4 + i % 4) * 4 * K;
for (int j = 0; j < K; j++)
{
tmp[0] = img[0];
tmp += 1;
img += N;
}
}
}
// unloop output M, unloop N, packet 4x4, using intrinsic
static void sgemm(int M, int N, int K, float* pA_t, float* pB_t, float* pC, int num_thread)
{
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = M >> 2;
remain_outch_start = nn_outch << 2;
// output ch0 - ch3
#pragma omp parallel for num_threads(num_thread)
for (int pp = 0; pp < nn_outch; pp++)
{
int i = pp * 4;
float* output0 = pC + ( i )*N;
float* output1 = pC + (i + 1) * N;
float* output2 = pC + (i + 2) * N;
float* output3 = pC + (i + 3) * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30
// k1
_vb = _mm_loadu_ps(vb + 4);
_va0 = _mm_set1_ps(va[4]);
_va1 = _mm_set1_ps(va[5]);
_va2 = _mm_set1_ps(va[6]);
_va3 = _mm_set1_ps(va[7]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a10-a13) * k01
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a10-a13) * k11
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a10-a13) * k21
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a10-a13) * k31
// k2
_vb = _mm_loadu_ps(vb + 8);
_va0 = _mm_set1_ps(va[8]);
_va1 = _mm_set1_ps(va[9]);
_va2 = _mm_set1_ps(va[10]);
_va3 = _mm_set1_ps(va[11]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a20-a23) * k02
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a20-a23) * k12
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a20-a23) * k22
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a20-a23) * k32
// k3
_vb = _mm_loadu_ps(vb + 12);
_va0 = _mm_set1_ps(va[12]);
_va1 = _mm_set1_ps(va[13]);
_va2 = _mm_set1_ps(va[14]);
_va3 = _mm_set1_ps(va[15]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a30-a33) * k03
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a30-a33) * k13
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a30-a33) * k23
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a30-a33) * k33
va += 16;
vb += 16;
}
for (; k < K; k++)
{
// k0
__m128 _vb = _mm_loadu_ps(vb);
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0)); // sum0 = (a00-a03) * k00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1)); // sum1 = (a00-a03) * k10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2)); // sum2 = (a00-a03) * k20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3)); // sum3 = (a00-a03) * k30
va += 4;
vb += 4;
}
_mm_storeu_ps(output0, _sum0);
_mm_storeu_ps(output1, _sum1);
_mm_storeu_ps(output2, _sum2);
_mm_storeu_ps(output3, _sum3);
#else
float sum0[4] = {0};
float sum1[4] = {0};
float sum2[4] = {0};
float sum3[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum0[n] += va[0] * vb[n];
sum1[n] += va[1] * vb[n];
sum2[n] += va[2] * vb[n];
sum3[n] += va[3] * vb[n];
}
va += 4;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output0[n] = sum0[n];
output1[n] = sum1[n];
output2[n] = sum2[n];
output3[n] = sum3[n];
}
#endif // __SSE__
output0 += 4;
output1 += 4;
output2 += 4;
output3 += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
#if __SSE__
__m128 _sum0_3 = _mm_set1_ps(0.f);
__m128 _sum0 = _mm_set1_ps(0.f);
__m128 _sum1 = _mm_set1_ps(0.f);
__m128 _sum2 = _mm_set1_ps(0.f);
__m128 _sum3 = _mm_set1_ps(0.f);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _vb1 = _mm_set1_ps(vb[1]);
__m128 _vb2 = _mm_set1_ps(vb[2]);
__m128 _vb3 = _mm_set1_ps(vb[3]);
__m128 _va0 = _mm_loadu_ps(va);
__m128 _va1 = _mm_loadu_ps(va + 4);
__m128 _va2 = _mm_loadu_ps(va + 8);
__m128 _va3 = _mm_loadu_ps(va + 12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); // sum0 += (k00-k30) * a00
_sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1)); // sum1 += (k01-k31) * a10
_sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2)); // sum2 += (k02-k32) * a20
_sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3)); // sum3 += (k03-k33) * a30
va += 16;
vb += 4;
}
_sum0 = _mm_add_ps(_sum0, _sum1);
_sum2 = _mm_add_ps(_sum2, _sum3);
_sum0_3 = _mm_add_ps(_sum0_3, _sum0);
_sum0_3 = _mm_add_ps(_sum0_3, _sum2);
for (; k < K; k++)
{
__m128 _vb0 = _mm_set1_ps(vb[0]);
__m128 _va = _mm_loadu_ps(va);
_sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0)); // sum0 += (k00-k30) * a00
va += 4;
vb += 1;
}
output0[0] = _sum0_3[0];
output1[0] = _sum0_3[1];
output2[0] = _sum0_3[2];
output3[0] = _sum0_3[3];
#else
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
for (int k = 0; k < K; k++)
{
sum0 += va[0] * vb[0];
sum1 += va[1] * vb[0];
sum2 += va[2] * vb[0];
sum3 += va[3] * vb[0];
va += 4;
vb += 1;
}
output0[0] = sum0;
output1[0] = sum1;
output2[0] = sum2;
output3[0] = sum3;
#endif // __SSE__
output0++;
output1++;
output2++;
output3++;
}
}
// output ch0
#pragma omp parallel for num_threads(num_thread)
for (int i = remain_outch_start; i < M; i++)
{
float* output = pC + i * N;
int j = 0;
for (; j + 3 < N; j += 4)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4) * 4 * K;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
int k = 0;
for (; k + 3 < K; k = k + 4)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _va1 = _mm_set1_ps(va[1]);
__m128 _va2 = _mm_set1_ps(va[2]);
__m128 _va3 = _mm_set1_ps(va[3]);
__m128 _vb0 = _mm_loadu_ps(vb);
__m128 _vb1 = _mm_loadu_ps(vb + 4);
__m128 _vb2 = _mm_loadu_ps(vb + 8);
__m128 _vb3 = _mm_loadu_ps(vb + 12);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1)); // sum0 += (a10-a13) * k01
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2)); // sum0 += (a20-a23) * k02
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3)); // sum0 += (a30-a33) * k03
va += 4;
vb += 16;
}
for (; k < K; k++)
{
// k0
__m128 _va0 = _mm_set1_ps(va[0]);
__m128 _vb0 = _mm_loadu_ps(vb);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00
va += 1;
vb += 4;
}
_mm_storeu_ps(output, _sum0);
#else
float sum[4] = {0};
for (int k = 0; k < K; k++)
{
for (int n = 0; n < 4; n++)
{
sum[n] += va[0] * vb[n];
}
va += 1;
vb += 4;
}
for (int n = 0; n < 4; n++)
{
output[n] = sum[n];
}
#endif // __SSE__
output += 4;
}
for (; j < N; j++)
{
float* va = pA_t + (i / 4 + i % 4) * 4 * K;
float* vb = pB_t + (j / 4 + j % 4) * 4 * K;
int k = 0;
#if __SSE__
__m128 _sum0 = _mm_set1_ps(0.f);
for (; k + 3 < K; k += 4)
{
__m128 _p0 = _mm_loadu_ps(vb);
__m128 _k0 = _mm_loadu_ps(va);
_sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0));
va += 4;
vb += 4;
}
float sum0 = _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3];
#else
float sum0 = 0.f;
#endif // __SSE__
for (; k < K; k++)
{
sum0 += va[0] * vb[0];
va += 1;
vb += 1;
}
output[0] = sum0;
output++;
}
}
}
#endif // __AVX2__
static void sgemm_fp32(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
float* output_fp32 = ( float* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
float* bias_fp32 = NULL;
if (bias)
bias_fp32 = ( float* )bias->data + outchan_g * group;
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = output_fp32;
sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
// process bias
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_fp32[output_off] += bias_fp32[i];
}
}
}
// process activation relu
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
}
}
}
// process activation relu6
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_fp32[output_off] < 0)
output_fp32[output_off] = 0;
if (output_fp32[output_off] > 6)
output_fp32[output_off] = 6;
}
}
}
}
static void sgemm_uint8(struct ir_tensor* input, struct ir_tensor* filter, struct ir_tensor* bias,
struct ir_tensor* output, struct conv_priv_info* priv_info, struct conv_param* param, int n,
int group, int num_thread)
{
int kernel_size = param->kernel_h * param->kernel_w * param->input_channel / param->group;
int outchan_g = param->output_channel / param->group;
int out_h = output->dims[2];
int out_w = output->dims[3];
int out_image_size = output->dims[1] * output->dims[2] * output->dims[3];
float* interleave_fp32 = ( float* )priv_info->interleave_buffer_pack4 + outchan_g * group * kernel_size;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
uint8_t * output_uint8 = ( uint8_t* )output->data + n * out_image_size + outchan_g * group * out_h * out_w;
int* bias_int32 = NULL;
float bias_scale = 0.f;
if (bias)
{
bias_int32 = ( int* )bias->data + outchan_g * group;
bias_scale = input->scale * filter->scale;
}
float* filter_sgemm = interleave_fp32;
float* input_sgemm_pack4 = im2col_pack4_fp32;
float* output_sgemm = (float*)sys_malloc(outchan_g * out_h * out_w * sizeof(float));
sgemm(outchan_g, out_h * out_w, kernel_size, filter_sgemm, input_sgemm_pack4, output_sgemm, num_thread);
/* process bias */
if (bias)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
output_sgemm[output_off] += (float )bias_int32[i] * bias_scale;
}
}
}
/* process activation relu */
if (param->activation == 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
}
}
}
/* process activation relu6 */
if (param->activation > 0)
{
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
if (output_sgemm[output_off] < 0)
output_sgemm[output_off] = 0;
if (output_sgemm[output_off] > 6)
output_sgemm[output_off] = 6;
}
}
}
/* quant from fp32 to uint8 */
for (int i = 0; i < outchan_g; i++)
{
for (int j = 0; j < out_h * out_w; j++)
{
int output_off = i * (out_h * out_w) + j;
int udata = ( int )(round(output_sgemm[output_off] / output->scale) + output->zero_point);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[output_off] = udata;
}
}
sys_free(output_sgemm);
}
/* check the conv wheather need to be using winograd */
static int winograd_support(struct conv_param* param, int in_h, int in_w)
{
int kernel_h = param->kernel_h;
int kernel_w = param->kernel_w;
int stride_h = param->stride_h;
int stride_w = param->stride_w;
int dilation_h = param->dilation_h;
int dilation_w = param->dilation_w;
int input_chan = param->input_channel;
int output_chan = param->output_channel;
int group = param->group;
if (in_h <= 10 && in_w <= 10)
return 0;
if (group != 1 || kernel_h != 3 || kernel_w != 3 || stride_h != 1 || stride_w != 1 || dilation_h != 1 ||
dilation_w != 1 || input_chan < 16 || output_chan < 16 || output_chan % 16)
return 0;
return 1;
}
int conv_hcl_get_shared_mem_size(struct ir_tensor* input, struct ir_tensor* output, struct conv_param* param)
{
int group = param->group;
int input_chan = param->input_channel / group;
int kernel_size = input_chan * param->kernel_h * param->kernel_w;
int output_xy = output->dims[2] * output->dims[3];
int elem_size = input->elem_size;
// simulator uint8 inference with fp32
if (input->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return elem_size * output_xy * kernel_size;
}
#if __AVX__
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return (8 * K * (N / 8 + N % 8)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter)
{
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
int size = 8 * K * (M / 8 + (M % 8) / 4 + M % 4) * elem_size;
return size;
}
void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = ( float* )priv_info->interleave_buffer;
float* pA_t = ( float* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 3;
int remain_outch_start = nn_outch << 3;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
const float* k4 = pA + (p + 4) * K;
const float* k5 = pA + (p + 5) * K;
const float* k6 = pA + (p + 6) * K;
const float* k7 = pA + (p + 7) * K;
float* ktmp = pA_t + (p / 8) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp[4] = k4[0];
ktmp[5] = k5[0];
ktmp[6] = k6[0];
ktmp[7] = k7[0];
ktmp += 8;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
k4 += 1;
k5 += 1;
k6 += 1;
k7 += 1;
}
}
nn_outch = (M - remain_outch_start) >> 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
remain_outch_start += nn_outch << 2;
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 8 + (p % 8) / 4 + p % 4) * 8 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
#else
int conv_hcl_get_shared_pack4_mem_size(struct ir_tensor* filter, struct ir_tensor* output, struct conv_param* param)
{
int K = filter->elem_num / filter->dims[0];
int N = output->dims[2] * output->dims[3];
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
return (4 * K * (N / 4 + N % 4)) * elem_size;
}
int conv_hcl_get_interleave_pack4_size(int M, int K, struct ir_tensor* filter)
{
int elem_size = filter->elem_size;
// simulator uint8 inference with fp32
if (filter->data_type == TENGINE_DT_UINT8)
elem_size = 4;
int size = 4 * K * (M / 4 + M % 4) * elem_size;
return size;
}
void conv_hcl_interleave_pack4(int M, int K, struct conv_priv_info* priv_info)
{
float* pA = ( float* )priv_info->interleave_buffer;
float* pA_t = ( float* )priv_info->interleave_buffer_pack4;
int nn_outch = M >> 2;
int remain_outch_start = nn_outch << 2;
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
const float* k0 = pA + (p + 0) * K;
const float* k1 = pA + (p + 1) * K;
const float* k2 = pA + (p + 2) * K;
const float* k3 = pA + (p + 3) * K;
float* ktmp = pA_t + (p / 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp[1] = k1[0];
ktmp[2] = k2[0];
ktmp[3] = k3[0];
ktmp += 4;
k0 += 1;
k1 += 1;
k2 += 1;
k3 += 1;
}
}
for (int p = remain_outch_start; p < M; p++)
{
const float* k0 = pA + (p + 0) * K;
float* ktmp = pA_t + (p / 4 + p % 4) * 4 * K;
for (int q = 0; q < K; q++)
{
ktmp[0] = k0[0];
ktmp++;
k0++;
}
}
}
#endif
int conv_hcl_prerun(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* output_tensor,
struct conv_priv_info* priv_info, struct conv_param* param)
{
int in_h = input_tensor->dims[2];
int in_w = input_tensor->dims[3];
/* check winograd implement, only for conv3x3s1 */
if (input_tensor->data_type == TENGINE_DT_FP32)
{
priv_info->winograd = winograd_support(param, in_h, in_w);
if (priv_info->winograd)
{
return wino_conv_hcl_prerun(input_tensor, filter_tensor, output_tensor, priv_info, param);
}
}
if (!priv_info->external_im2col_mem)
{
int mem_size = conv_hcl_get_shared_mem_size(input_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
}
if (!priv_info->external_im2col_pack4_mem)
{
int mem_size = conv_hcl_get_shared_pack4_mem_size(filter_tensor, output_tensor, param);
void* mem = sys_malloc(mem_size);
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
}
if (!priv_info->external_interleave_mem)
{
int mem_size = get_private_mem_size(filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer = mem;
priv_info->interleave_buffer_size = mem_size;
}
if (input_tensor->data_type == TENGINE_DT_UINT8)
interleave_uint8(filter_tensor, priv_info);
else
interleave(filter_tensor, priv_info);
if (priv_info->external_interleave_pack4_mem)
{
int M = filter_tensor->dims[0];
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int mem_size = conv_hcl_get_interleave_pack4_size(M, K, filter_tensor);
void* mem = sys_malloc(mem_size);
priv_info->interleave_buffer_pack4 = mem;
priv_info->interleave_buffer_pack4_size = mem_size;
conv_hcl_interleave_pack4(M, K, priv_info);
if (!priv_info->external_interleave_mem && priv_info->interleave_buffer)
{
sys_free(priv_info->interleave_buffer);
priv_info->interleave_buffer = NULL;
}
}
return 0;
}
int conv_hcl_postrun(struct conv_priv_info* priv_info)
{
if (priv_info->winograd)
{
return wino_conv_hcl_postrun(priv_info);
}
if (priv_info->external_interleave_pack4_mem && !priv_info->external_interleave_mem &&
priv_info->interleave_buffer != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
if (!priv_info->external_im2col_mem && priv_info->im2col_buffer != NULL)
{
sys_free(priv_info->im2col_buffer);
priv_info->im2col_buffer = NULL;
}
if (!priv_info->external_im2col_pack4_mem && priv_info->im2col_buffer_pack4 != NULL)
{
sys_free(priv_info->im2col_buffer_pack4);
priv_info->im2col_buffer_pack4 = NULL;
}
if (priv_info->external_interleave_pack4_mem && priv_info->interleave_buffer_pack4 != NULL)
{
sys_free(priv_info->interleave_buffer_pack4);
priv_info->interleave_buffer_pack4 = NULL;
}
return 0;
}
int conv_hcl_run(struct ir_tensor* input_tensor, struct ir_tensor* filter_tensor, struct ir_tensor* bias_tensor,
struct ir_tensor* output_tensor, struct conv_priv_info* priv_info, struct conv_param* param,
int num_thread, int cpu_affinity)
{
int group = param->group;
int type = input_tensor->data_type;
if (priv_info->winograd)
{
return wino_conv_hcl_run(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, num_thread,
cpu_affinity);
}
for (int i = 0; i < input_tensor->dims[0]; i++) // batch size
{
for (int j = 0; j < group; j++)
{
im2col_ir(input_tensor, output_tensor, priv_info, param, i, j);
int K = filter_tensor->elem_num / filter_tensor->dims[0];
int N = output_tensor->dims[2] * output_tensor->dims[3];
float* im2col_fp32 = priv_info->im2col_buffer;
float* im2col_pack4_fp32 = priv_info->im2col_buffer_pack4;
input_pack4(K, N, im2col_fp32, im2col_pack4_fp32, num_thread);
if (type == TENGINE_DT_UINT8)
sgemm_uint8(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
else
sgemm_fp32(input_tensor, filter_tensor, bias_tensor, output_tensor, priv_info, param, i, j, num_thread);
}
}
return 0;
}
int conv_hcl_set_shared_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_mem = 1;
priv_info->im2col_buffer = mem;
priv_info->im2col_buffer_size = mem_size;
return 0;
}
int conv_hcl_set_shared_pack4_mem(struct conv_priv_info* priv_info, void* mem, int mem_size)
{
priv_info->external_im2col_pack4_mem = 1;
priv_info->im2col_buffer_pack4 = mem;
priv_info->im2col_buffer_pack4_size = mem_size;
return 0;
}
|
GB_unop__asinh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__asinh_fc64_fc64
// op(A') function: GB_unop_tran__asinh_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = casinh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = casinh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = casinh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ASINH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__asinh_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = casinh (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__asinh_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int32_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int32_int64
// op(A') function: GB_tran__abs_int32_int64
// C type: int32_t
// A type: int64_t
// cast: int32_t cij = (int32_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, aij) \
int32_t z = (int32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int32_int64
(
int32_t *Cx, // Cx and Ax may be aliased
int64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int32_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
blackscholes.c | // Copyright (c) 2007 Intel Corp.
// Black-Scholes
// Analytical method for calculating European Options
//
//
// Reference Source: Options, Futures, and Other Derivatives, 3rd Edition, Prentice
// Hall, John C. Hull,
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
#ifdef ENABLE_PARSEC_HOOKS
#include <hooks.h>
#endif
// Multi-threaded pthreads header
#ifdef ENABLE_THREADS
// Add the following line so that icc 9.0 is compatible with pthread lib.
#define __thread __threadp
MAIN_ENV
#undef __thread
#endif
// Multi-threaded OpenMP header
#ifdef ENABLE_OPENMP
#include <omp.h>
#endif
#ifdef ENABLE_TBB
#include "tbb/blocked_range.h"
#include "tbb/parallel_for.h"
#include "tbb/task_scheduler_init.h"
#include "tbb/tick_count.h"
using namespace std;
using namespace tbb;
#endif //ENABLE_TBB
// Multi-threaded header for Windows
#ifdef WIN32
#pragma warning(disable : 4305)
#pragma warning(disable : 4244)
#include <windows.h>
#endif
//Precision to use for calculations
#define fptype float
#define NUM_RUNS 100
typedef struct OptionData_ {
fptype s; // spot price
fptype strike; // strike price
fptype r; // risk-free interest rate
fptype divq; // dividend rate
fptype v; // volatility
fptype t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
char OptionType; // Option type. "P"=PUT, "C"=CALL
fptype divs; // dividend vals (not used in this test)
fptype DGrefval; // DerivaGem Reference Value
} OptionData;
OptionData *data;
fptype *prices;
int numOptions;
int * otype;
fptype * sptprice;
fptype * strike;
fptype * rate;
fptype * volatility;
fptype * otime;
int numError = 0;
int nThreads;
// input data generator
//Precision to use
#define fptype2 double
typedef struct OptionData2_ {
fptype2 s; // spot price
fptype2 strike; // strike price
fptype2 r; // risk-free interest rate
fptype2 divq; // dividend rate
fptype2 v; // volatility
fptype2 t; // time to maturity or option expiration in years
// (1yr = 1.0, 6mos = 0.5, 3mos = 0.25, ..., etc)
const char *OptionType; // Option type. "P"=PUT, "C"=CALL
fptype2 divs; // dividend vals (not used in this test)
fptype2 DGrefval; // DerivaGem Reference Value
} OptionData2;
//Total number of options in optionData.txt
#define MAX_OPTIONS 1000
OptionData2 data_init[] = {
#include "optionData.txt"
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// Cumulative Normal Distribution Function
// See Hull, Section 11.8, P.243-244
#define inv_sqrt_2xPI 0.39894228040143270286
fptype CNDF ( fptype InputX )
{
int sign;
fptype OutputX;
fptype xInput;
fptype xNPrimeofX;
fptype expValues;
fptype xK2;
fptype xK2_2, xK2_3;
fptype xK2_4, xK2_5;
fptype xLocal, xLocal_1;
fptype xLocal_2, xLocal_3;
// Check for negative value of InputX
if (InputX < 0.0) {
InputX = -InputX;
sign = 1;
} else
sign = 0;
xInput = InputX;
// Compute NPrimeX term common to both four & six decimal accuracy calcs
expValues = exp(-0.5f * InputX * InputX);
xNPrimeofX = expValues;
xNPrimeofX = xNPrimeofX * inv_sqrt_2xPI;
xK2 = 0.2316419 * xInput;
xK2 = 1.0 + xK2;
xK2 = 1.0 / xK2;
xK2_2 = xK2 * xK2;
xK2_3 = xK2_2 * xK2;
xK2_4 = xK2_3 * xK2;
xK2_5 = xK2_4 * xK2;
xLocal_1 = xK2 * 0.319381530;
xLocal_2 = xK2_2 * (-0.356563782);
xLocal_3 = xK2_3 * 1.781477937;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_4 * (-1.821255978);
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_3 = xK2_5 * 1.330274429;
xLocal_2 = xLocal_2 + xLocal_3;
xLocal_1 = xLocal_2 + xLocal_1;
xLocal = xLocal_1 * xNPrimeofX;
xLocal = 1.0 - xLocal;
OutputX = xLocal;
if (sign) {
OutputX = 1.0 - OutputX;
}
return OutputX;
}
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
fptype BlkSchlsEqEuroNoDiv( fptype sptprice,
fptype strike, fptype rate, fptype volatility,
fptype time, int otype, float timet )
{
fptype OptionPrice;
// local private working variables for the calculation
fptype xStockPrice;
fptype xStrikePrice;
fptype xRiskFreeRate;
fptype xVolatility;
fptype xTime;
fptype xSqrtTime;
fptype logValues;
fptype xLogTerm;
fptype xD1;
fptype xD2;
fptype xPowerTerm;
fptype xDen;
fptype d1;
fptype d2;
fptype FutureValueX;
fptype NofXd1;
fptype NofXd2;
fptype NegNofXd1;
fptype NegNofXd2;
xStockPrice = sptprice;
xStrikePrice = strike;
xRiskFreeRate = rate;
xVolatility = volatility;
xTime = time;
xSqrtTime = sqrt(xTime);
logValues = log( sptprice / strike );
xLogTerm = logValues;
xPowerTerm = xVolatility * xVolatility;
xPowerTerm = xPowerTerm * 0.5;
xD1 = xRiskFreeRate + xPowerTerm;
xD1 = xD1 * xTime;
xD1 = xD1 + xLogTerm;
xDen = xVolatility * xSqrtTime;
xD1 = xD1 / xDen;
xD2 = xD1 - xDen;
d1 = xD1;
d2 = xD2;
NofXd1 = CNDF( d1 );
NofXd2 = CNDF( d2 );
FutureValueX = strike * ( exp( -(rate)*(time) ) );
if (otype == 0) {
OptionPrice = (sptprice * NofXd1) - (FutureValueX * NofXd2);
} else {
NegNofXd1 = (1.0 - NofXd1);
NegNofXd2 = (1.0 - NofXd2);
OptionPrice = (FutureValueX * NegNofXd2) - (sptprice * NegNofXd1);
}
return OptionPrice;
}
#ifdef ENABLE_TBB
struct mainWork {
mainWork() {}
mainWork(mainWork &w, tbb::split) {}
void operator()(const tbb::blocked_range<int> &range) const {
fptype price;
int begin = range.begin();
int end = range.end();
for (int i=begin; i!=end; i++) {
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
fptype priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-5 ){
fprintf(stderr,"Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
};
#endif // ENABLE_TBB
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////
#ifdef ENABLE_TBB
int bs_thread(void *tid_ptr) {
int j;
tbb::affinity_partitioner a;
mainWork doall;
for (j=0; j<NUM_RUNS; j++) {
tbb::parallel_for(tbb::blocked_range<int>(0, numOptions), doall, a);
}
return 0;
}
#else // !ENABLE_TBB
#ifdef WIN32
DWORD WINAPI bs_thread(LPVOID tid_ptr){
#else
int bs_thread(void *tid_ptr) {
#endif
int i, j;
fptype price;
fptype priceDelta;
int tid = *(int *)tid_ptr;
int start = tid * (numOptions / nThreads);
int end = start + (numOptions / nThreads);
clock_t time0 = clock();
for (j=0; j<NUM_RUNS; j++) {
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(i, price, priceDelta)
for (i=0; i<numOptions; i++) {
#else //ENABLE_OPENMP
for (i=start; i<end; i++) {
#endif //ENABLE_OPENMP
/* Calling main function to calculate option value based on
* Black & Scholes's equation.
*/
price = BlkSchlsEqEuroNoDiv( sptprice[i], strike[i],
rate[i], volatility[i], otime[i],
otype[i], 0);
prices[i] = price;
#ifdef ERR_CHK
priceDelta = data[i].DGrefval - price;
if( fabs(priceDelta) >= 1e-4 ){
printf("Error on %d. Computed=%.5f, Ref=%.5f, Delta=%.5f\n",
i, price, data[i].DGrefval, priceDelta);
numError ++;
}
#endif
}
}
clock_t time1 = clock();
printf("{ \"status\": %d, \"options\": \"%d\", \"time\": %f }\n", 1, numOptions, (float) (time1-time0) / 1000000);
return 0;
}
#endif //ENABLE_TBB
int main (int argc, char **argv)
{
FILE *file;
int i;
int loopnum;
fptype * buffer;
int * buffer2;
int rv;
#ifdef PARSEC_VERSION
#define __PARSEC_STRING(x) #x
#define __PARSEC_XSTRING(x) __PARSEC_STRING(x)
printf("PARSEC Benchmark Suite Version "__PARSEC_XSTRING(PARSEC_VERSION)"\n");
fflush(NULL);
#else
printf("PARSEC Benchmark Suite\n");
fflush(NULL);
#endif //PARSEC_VERSION
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_begin(__parsec_blackscholes);
#endif
if (argc != 4)
{
//printf("Usage:\n\t%s <nthreads> <inputFile> <outputFile>\n", argv[0]);
printf("Usage:\n\t%s <nthreads> <numOptions> <outputFile>\n", argv[0]);
exit(1);
}
nThreads = atoi(argv[1]);
numOptions = atoi(argv[2]);
//char *inputFile = argv[2];
char *outputFile = argv[3];
//Read input data from file
//file = fopen(inputFile, "r");
// if(file == NULL) {
// printf("ERROR: Unable to open file `%s'.\n", inputFile);
// exit(1);
// }
// rv = fscanf(file, "%i", &numOptions);
// if(rv != 1) {
// printf("ERROR: Unable to read from file `%s'.\n", inputFile);
// fclose(file);
// exit(1);
// }
if(nThreads > numOptions) {
printf("WARNING: Not enough work, reducing number of threads to match number of options.\n");
nThreads = numOptions;
}
#if !defined(ENABLE_THREADS) && !defined(ENABLE_OPENMP) && !defined(ENABLE_TBB)
if(nThreads != 1) {
printf("Error: <nthreads> must be 1 (serial version)\n");
exit(1);
}
#endif
// alloc spaces for the option data
data = (OptionData*)malloc(numOptions*sizeof(OptionData));
prices = (fptype*)malloc(numOptions*sizeof(fptype));
for ( loopnum = 0; loopnum < numOptions; ++ loopnum )
{
data[loopnum].s = data_init[loopnum % MAX_OPTIONS].s;
data[loopnum].strike = data_init[loopnum % MAX_OPTIONS].strike;
data[loopnum].r = data_init[loopnum % MAX_OPTIONS].r;
data[loopnum].divq = data_init[loopnum % MAX_OPTIONS].divq;
data[loopnum].v = data_init[loopnum % MAX_OPTIONS].v;
data[loopnum].t = data_init[loopnum % MAX_OPTIONS].t;
data[loopnum].OptionType = data_init[loopnum % MAX_OPTIONS].OptionType[0];
data[loopnum].divs = data_init[loopnum % MAX_OPTIONS].divs;
data[loopnum].DGrefval = data_init[loopnum % MAX_OPTIONS].DGrefval;
// rv = fscanf(file, "%f %f %f %f %f %f %c %f %f", &data[loopnum].s, &data[loopnum].strike, &data[loopnum].r, &data[loopnum].divq, &data[loopnum].v, &data[loopnum].t, &data[loopnum].OptionType, &data[loopnum].divs, &data[loopnum].DGrefval);
// if(rv != 9) {
// printf("ERROR: Unable to read from file `%s'.\n", inputFile);
// fclose(file);
// exit(1);
// }
}
// rv = fclose(file);
// if(rv != 0) {
// printf("ERROR: Unable to close file `%s'.\n", inputFile);
// exit(1);
// }
#ifdef ENABLE_THREADS
MAIN_INITENV(,8000000,nThreads);
#endif
printf("Num of Options: %d\n", numOptions);
printf("Num of Runs: %d\n", NUM_RUNS);
#define PAD 256
#define LINESIZE 64
buffer = (fptype *) malloc(5 * numOptions * sizeof(fptype) + PAD);
sptprice = (fptype *) (((unsigned long long)buffer + PAD) & ~(LINESIZE - 1));
strike = sptprice + numOptions;
rate = strike + numOptions;
volatility = rate + numOptions;
otime = volatility + numOptions;
buffer2 = (int *) malloc(numOptions * sizeof(fptype) + PAD);
otype = (int *) (((unsigned long long)buffer2 + PAD) & ~(LINESIZE - 1));
for (i=0; i<numOptions; i++) {
otype[i] = (data[i].OptionType == 'P') ? 1 : 0;
sptprice[i] = data[i].s;
strike[i] = data[i].strike;
rate[i] = data[i].r;
volatility[i] = data[i].v;
otime[i] = data[i].t;
}
printf("Size of data: %lu\n", numOptions * (sizeof(OptionData) + sizeof(int)));
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_begin();
#endif
#ifdef ENABLE_THREADS
#ifdef WIN32
HANDLE *threads;
int *nums;
threads = (HANDLE *) malloc (nThreads * sizeof(HANDLE));
nums = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
nums[i] = i;
threads[i] = CreateThread(0, 0, bs_thread, &nums[i], 0, 0);
}
WaitForMultipleObjects(nThreads, threads, TRUE, INFINITE);
free(threads);
free(nums);
#else
int *tids;
tids = (int *) malloc (nThreads * sizeof(int));
for(i=0; i<nThreads; i++) {
tids[i]=i;
CREATE_WITH_ARG(bs_thread, &tids[i]);
}
WAIT_FOR_END(nThreads);
free(tids);
#endif //WIN32
#else //ENABLE_THREADS
#ifdef ENABLE_OPENMP
{
int tid=0;
omp_set_num_threads(nThreads);
bs_thread(&tid);
}
#else //ENABLE_OPENMP
#ifdef ENABLE_TBB
tbb::task_scheduler_init init(nThreads);
int tid=0;
bs_thread(&tid);
#else //ENABLE_TBB
//serial version
int tid=0;
bs_thread(&tid);
#endif //ENABLE_TBB
#endif //ENABLE_OPENMP
#endif //ENABLE_THREADS
#ifdef ENABLE_PARSEC_HOOKS
__parsec_roi_end();
#endif
//Write prices to output file
file = fopen(outputFile, "w");
if(file == NULL) {
printf("ERROR: Unable to open file `%s'.\n", outputFile);
exit(1);
}
rv = fprintf(file, "%i\n", numOptions);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
for(i=0; i<numOptions; i++) {
rv = fprintf(file, "%.18f\n", prices[i]);
if(rv < 0) {
printf("ERROR: Unable to write to file `%s'.\n", outputFile);
fclose(file);
exit(1);
}
}
rv = fclose(file);
if(rv != 0) {
printf("ERROR: Unable to close file `%s'.\n", outputFile);
exit(1);
}
#ifdef ERR_CHK
printf("Num Errors: %d\n", numError);
#endif
free(data);
free(prices);
#ifdef ENABLE_PARSEC_HOOKS
__parsec_bench_end();
#endif
return 0;
}
|
gbdt.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_BOOSTING_GBDT_H_
#define LIGHTGBM_BOOSTING_GBDT_H_
#include <LightGBM/boosting.h>
#include <LightGBM/objective_function.h>
#include <LightGBM/prediction_early_stop.h>
#include <LightGBM/cuda/vector_cudahost.h>
#include <LightGBM/utils/json11.h>
#include <LightGBM/utils/threading.h>
#include <string>
#include <algorithm>
#include <cstdio>
#include <fstream>
#include <map>
#include <memory>
#include <mutex>
#include <unordered_map>
#include <utility>
#include <vector>
#include "score_updater.hpp"
namespace LightGBM {
using json11::Json;
/*!
* \brief GBDT algorithm implementation. including Training, prediction, bagging.
*/
class GBDT : public GBDTBase {
public:
/*!
* \brief Constructor
*/
GBDT();
/*!
* \brief Destructor
*/
~GBDT();
/*!
* \brief Initialization logic
* \param gbdt_config Config for boosting
* \param train_data Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void Init(const Config* gbdt_config, const Dataset* train_data,
const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Merge model from other boosting object. Will insert to the front of current boosting object
* \param other
*/
void MergeFrom(const Boosting* other) override {
auto other_gbdt = reinterpret_cast<const GBDT*>(other);
// tmp move to other vector
auto original_models = std::move(models_);
models_ = std::vector<std::unique_ptr<Tree>>();
// push model from other first
for (const auto& tree : other_gbdt->models_) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_init_iteration_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
// push model in current object
for (const auto& tree : original_models) {
auto new_tree = std::unique_ptr<Tree>(new Tree(*(tree.get())));
models_.push_back(std::move(new_tree));
}
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
}
void ShuffleModels(int start_iter, int end_iter) override {
int total_iter = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iter = std::max(0, start_iter);
if (end_iter <= 0) {
end_iter = total_iter;
}
end_iter = std::min(total_iter, end_iter);
auto original_models = std::move(models_);
std::vector<int> indices(total_iter);
for (int i = 0; i < total_iter; ++i) {
indices[i] = i;
}
Random tmp_rand(17);
for (int i = start_iter; i < end_iter - 1; ++i) {
int j = tmp_rand.NextShort(i + 1, end_iter);
std::swap(indices[i], indices[j]);
}
models_ = std::vector<std::unique_ptr<Tree>>();
for (int i = 0; i < total_iter; ++i) {
for (int j = 0; j < num_tree_per_iteration_; ++j) {
int tree_idx = indices[i] * num_tree_per_iteration_ + j;
auto new_tree = std::unique_ptr<Tree>(new Tree(*(original_models[tree_idx].get())));
models_.push_back(std::move(new_tree));
}
}
}
/*!
* \brief Reset the training data
* \param train_data New Training data
* \param objective_function Training objective function
* \param training_metrics Training metrics
*/
void ResetTrainingData(const Dataset* train_data, const ObjectiveFunction* objective_function,
const std::vector<const Metric*>& training_metrics) override;
/*!
* \brief Reset Boosting Config
* \param gbdt_config Config for boosting
*/
void ResetConfig(const Config* gbdt_config) override;
/*!
* \brief Adding a validation dataset
* \param valid_data Validation dataset
* \param valid_metrics Metrics for validation dataset
*/
void AddValidDataset(const Dataset* valid_data,
const std::vector<const Metric*>& valid_metrics) override;
/*!
* \brief Perform a full training procedure
* \param snapshot_freq frequency of snapshot
* \param model_output_path path of model file
*/
void Train(int snapshot_freq, const std::string& model_output_path) override;
void RefitTree(const std::vector<std::vector<int>>& tree_leaf_prediction) override;
/*!
* \brief Training logic
* \param gradients nullptr for using default objective, otherwise use self-defined boosting
* \param hessians nullptr for using default objective, otherwise use self-defined boosting
* \return True if cannot train any more
*/
bool TrainOneIter(const score_t* gradients, const score_t* hessians) override;
/*!
* \brief Rollback one iteration
*/
void RollbackOneIter() override;
/*!
* \brief Get current iteration
*/
int GetCurrentIteration() const override { return static_cast<int>(models_.size()) / num_tree_per_iteration_; }
/*!
* \brief Can use early stopping for prediction or not
* \return True if cannot use early stopping for prediction
*/
bool NeedAccuratePrediction() const override {
if (objective_function_ == nullptr) {
return true;
} else {
return objective_function_->NeedAccuratePrediction();
}
}
/*!
* \brief Get evaluation result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return evaluation result
*/
std::vector<double> GetEvalAt(int data_idx) const override;
/*!
* \brief Get current training score
* \param out_len length of returned score
* \return training score
*/
const double* GetTrainingScore(int64_t* out_len) override;
/*!
* \brief Get size of prediction at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \return The size of prediction
*/
int64_t GetNumPredictAt(int data_idx) const override {
CHECK(data_idx >= 0 && data_idx <= static_cast<int>(valid_score_updater_.size()));
data_size_t num_data = train_data_->num_data();
if (data_idx > 0) {
num_data = valid_score_updater_[data_idx - 1]->num_data();
}
return num_data * num_class_;
}
/*!
* \brief Get prediction result at data_idx data
* \param data_idx 0: training data, 1: 1st validation data
* \param result used to store prediction result, should allocate memory before call this function
* \param out_len length of returned score
*/
void GetPredictAt(int data_idx, double* out_result, int64_t* out_len) override;
/*!
* \brief Get number of prediction for one data
* \param start_iteration Start index of the iteration to predict
* \param num_iteration number of used iterations
* \param is_pred_leaf True if predicting leaf index
* \param is_pred_contrib True if predicting feature contribution
* \return number of prediction
*/
inline int NumPredictOneRow(int start_iteration, int num_iteration, bool is_pred_leaf, bool is_pred_contrib) const override {
int num_pred_in_one_row = num_class_;
if (is_pred_leaf) {
int max_iteration = GetCurrentIteration();
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, max_iteration);
if (num_iteration > 0) {
num_pred_in_one_row *= static_cast<int>(std::min(max_iteration - start_iteration, num_iteration));
} else {
num_pred_in_one_row *= (max_iteration - start_iteration);
}
} else if (is_pred_contrib) {
num_pred_in_one_row = num_tree_per_iteration_ * (max_feature_idx_ + 2); // +1 for 0-based indexing, +1 for baseline
}
return num_pred_in_one_row;
}
void PredictRaw(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictRawByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void Predict(const double* features, double* output,
const PredictionEarlyStopInstance* earlyStop) const override;
void PredictByMap(const std::unordered_map<int, double>& features, double* output,
const PredictionEarlyStopInstance* early_stop) const override;
void PredictLeafIndex(const double* features, double* output) const override;
void PredictLeafIndexByMap(const std::unordered_map<int, double>& features, double* output) const override;
void PredictContrib(const double* features, double* output) const override;
void PredictContribByMap(const std::unordered_map<int, double>& features,
std::vector<std::unordered_map<int, double>>* output) const override;
/*!
* \brief Dump model to json format string
* \param start_iteration The model will be saved start from
* \param num_iteration Number of iterations that want to dump, -1 means dump all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Json format string of model
*/
std::string DumpModel(int start_iteration, int num_iteration,
int feature_importance_type) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \return if-else format codes of model
*/
std::string ModelToIfElse(int num_iteration) const override;
/*!
* \brief Translate model to if-else statement
* \param num_iteration Number of iterations that want to translate, -1 means translate all
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToIfElse(int num_iteration, const char* filename) const override;
/*!
* \brief Save model to file
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \param filename Filename that want to save to
* \return is_finish Is training finished or not
*/
bool SaveModelToFile(int start_iteration, int num_iterations,
int feature_importance_type,
const char* filename) const override;
/*!
* \brief Save model to string
* \param start_iteration The model will be saved start from
* \param num_iterations Number of model that want to save, -1 means save all
* \param feature_importance_type Type of feature importance, 0: split, 1: gain
* \return Non-empty string if succeeded
*/
std::string SaveModelToString(int start_iteration, int num_iterations, int feature_importance_type) const override;
/*!
* \brief Restore from a serialized buffer
*/
bool LoadModelFromString(const char* buffer, size_t len) override;
/*!
* \brief Calculate feature importances
* \param num_iteration Number of model that want to use for feature importance, -1 means use all
* \param importance_type: 0 for split, 1 for gain
* \return vector of feature_importance
*/
std::vector<double> FeatureImportance(int num_iteration, int importance_type) const override;
/*!
* \brief Calculate upper bound value
* \return upper bound value
*/
double GetUpperBoundValue() const override;
/*!
* \brief Calculate lower bound value
* \return lower bound value
*/
double GetLowerBoundValue() const override;
/*!
* \brief Get max feature index of this model
* \return Max feature index of this model
*/
inline int MaxFeatureIdx() const override { return max_feature_idx_; }
/*!
* \brief Get feature names of this model
* \return Feature names of this model
*/
inline std::vector<std::string> FeatureNames() const override { return feature_names_; }
/*!
* \brief Get index of label column
* \return index of label column
*/
inline int LabelIdx() const override { return label_idx_; }
/*!
* \brief Get number of weak sub-models
* \return Number of weak sub-models
*/
inline int NumberOfTotalModel() const override { return static_cast<int>(models_.size()); }
/*!
* \brief Get number of tree per iteration
* \return number of tree per iteration
*/
inline int NumModelPerIteration() const override { return num_tree_per_iteration_; }
/*!
* \brief Get number of classes
* \return Number of classes
*/
inline int NumberOfClasses() const override { return num_class_; }
/*!
* \brief Get mapping info
* \return mapping info
*/
inline std::vector< std::string > GetMapping() const { return mapping; }
/*!
* \brief Get number of mapping
* \return number of mapping
*/
inline int GetNumMapping() const { return mapping.size(); }
inline void InitPredict(int start_iteration, int num_iteration, bool is_pred_contrib) override {
num_iteration_for_pred_ = static_cast<int>(models_.size()) / num_tree_per_iteration_;
start_iteration = std::max(start_iteration, 0);
start_iteration = std::min(start_iteration, num_iteration_for_pred_);
if (num_iteration > 0) {
num_iteration_for_pred_ = std::min(num_iteration, num_iteration_for_pred_ - start_iteration);
} else {
num_iteration_for_pred_ = num_iteration_for_pred_ - start_iteration;
}
start_iteration_for_pred_ = start_iteration;
if (is_pred_contrib) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < static_cast<int>(models_.size()); ++i) {
models_[i]->RecomputeMaxDepth();
}
}
}
inline double GetLeafValue(int tree_idx, int leaf_idx) const override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
return models_[tree_idx]->LeafOutput(leaf_idx);
}
inline void SetLeafValue(int tree_idx, int leaf_idx, double val) override {
CHECK(tree_idx >= 0 && static_cast<size_t>(tree_idx) < models_.size());
CHECK(leaf_idx >= 0 && leaf_idx < models_[tree_idx]->num_leaves());
models_[tree_idx]->SetLeafOutput(leaf_idx, val);
}
/*!
* \brief Get Type name of this boosting object
*/
const char* SubModelName() const override { return "tree"; }
bool IsLinear() const override { return linear_tree_; }
inline std::string ParserConfigStr() const override {return parser_config_str_;}
protected:
virtual bool GetIsConstHessian(const ObjectiveFunction* objective_function) {
if (objective_function != nullptr) {
return objective_function->IsConstantHessian();
} else {
return false;
}
}
/*!
* \brief Print eval result and check early stopping
*/
virtual bool EvalAndCheckEarlyStopping();
/*!
* \brief reset config for bagging
*/
void ResetBaggingConfig(const Config* config, bool is_change_dataset);
/*!
* \brief Implement bagging logic
* \param iter Current interation
*/
virtual void Bagging(int iter);
virtual data_size_t BaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
data_size_t BalancedBaggingHelper(data_size_t start, data_size_t cnt,
data_size_t* buffer);
/*!
* \brief calculate the object function
*/
virtual void Boosting();
/*!
* \brief updating score after tree was trained
* \param tree Trained tree of this iteration
* \param cur_tree_id Current tree for multiclass training
*/
virtual void UpdateScore(const Tree* tree, const int cur_tree_id);
/*!
* \brief eval results for one metric
*/
virtual std::vector<double> EvalOneMetric(const Metric* metric, const double* score) const;
/*!
* \brief Print metric result of current iteration
* \param iter Current iteration
* \return best_msg if met early_stopping
*/
std::string OutputMetric(int iter);
double BoostFromAverage(int class_id, bool update_scorer);
/*! \brief current iteration */
int iter_;
/*! \brief Pointer to training data */
const Dataset* train_data_;
/*! \brief Config of gbdt */
std::unique_ptr<Config> config_;
/*! \brief Tree learner, will use this class to learn trees */
std::unique_ptr<TreeLearner> tree_learner_;
/*! \brief Objective function */
const ObjectiveFunction* objective_function_;
/*! \brief Store and update training data's score */
std::unique_ptr<ScoreUpdater> train_score_updater_;
/*! \brief Metrics for training data */
std::vector<const Metric*> training_metrics_;
/*! \brief Store and update validation data's scores */
std::vector<std::unique_ptr<ScoreUpdater>> valid_score_updater_;
/*! \brief Metric for validation data */
std::vector<std::vector<const Metric*>> valid_metrics_;
/*! \brief Number of rounds for early stopping */
int early_stopping_round_;
/*! \brief Only use first metric for early stopping */
bool es_first_metric_only_;
/*! \brief Best iteration(s) for early stopping */
std::vector<std::vector<int>> best_iter_;
/*! \brief Best score(s) for early stopping */
std::vector<std::vector<double>> best_score_;
/*! \brief output message of best iteration */
std::vector<std::vector<std::string>> best_msg_;
/*! \brief Trained models(trees) */
std::vector<std::unique_ptr<Tree>> models_;
/*! \brief Max feature index of training data*/
int max_feature_idx_;
/*! \brief Parser config file content */
std::string parser_config_str_ = "";
/*! \brief mapping info */
std::vector< std::string > mapping;
#ifdef USE_CUDA
/*! \brief First order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, CHAllocator<score_t>> hessians_;
#else
/*! \brief First order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> gradients_;
/*! \brief Second order derivative of training data */
std::vector<score_t, Common::AlignmentAllocator<score_t, kAlignedSize>> hessians_;
#endif
/*! \brief Store the indices of in-bag data */
std::vector<data_size_t, Common::AlignmentAllocator<data_size_t, kAlignedSize>> bag_data_indices_;
/*! \brief Number of in-bag data */
data_size_t bag_data_cnt_;
/*! \brief Number of training data */
data_size_t num_data_;
/*! \brief Number of trees per iterations */
int num_tree_per_iteration_;
/*! \brief Number of class */
int num_class_;
/*! \brief Index of label column */
data_size_t label_idx_;
/*! \brief number of used model */
int num_iteration_for_pred_;
/*! \brief Start iteration of used model */
int start_iteration_for_pred_;
/*! \brief Shrinkage rate for one iteration */
double shrinkage_rate_;
/*! \brief Number of loaded initial models */
int num_init_iteration_;
/*! \brief Feature names */
std::vector<std::string> feature_names_;
std::vector<std::string> feature_infos_;
std::unique_ptr<Dataset> tmp_subset_;
bool is_use_subset_;
std::vector<bool> class_need_train_;
bool is_constant_hessian_;
std::unique_ptr<ObjectiveFunction> loaded_objective_;
bool average_output_;
bool need_re_bagging_;
bool balanced_bagging_;
std::string loaded_parameter_;
std::vector<int8_t> monotone_constraints_;
const int bagging_rand_block_ = 1024;
std::vector<Random> bagging_rands_;
ParallelPartitionRunner<data_size_t, false> bagging_runner_;
Json forced_splits_json_;
bool linear_tree_;
};
} // namespace LightGBM
#endif // LightGBM_BOOSTING_GBDT_H_
|
TomoP3DModel_core.c | /*
* Copyright 2017 Daniil Kazantsev
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TomoP3DModel_core.h"
#define M_PI 3.14159265358979323846
#define MAXCHAR 1000
/* Function to read parameters from the file Phantom3DLibrary.dat to build 3D analytical models
*
* Input Parameters:
* - ModelNo - the model number from Phantom3DLibrary file
* - DIM volume dimensions [N1,N2,N3] in voxels (N1 x N2 x N3)
* - Object - Analytical Model selection
* - Z1,Z2 are upper and lower indeces of the vertical dim to extract a slab
* - C0 - intensity
* - x0 - x0 position
* - y0 - y0 position
* - z0 - z0 position
* - a - size object
* - b - size object
* - c - size object
* - psi_gr1 - rotation angle1
* - psi_gr2 - rotation angle2
* - psi_gr3 - rotation angle3
*
* Output:
* 1. The analytical phantom size of [N1 x N2 x N3] or temporal 4D phantom (N1 x N2 x N3 x time-frames)
* Note if Z1, Z2 indeces selected then the size can be [N1 x N2 x Z2-Z1]
*/
/* function to build a single (stationary) object */
float TomoP3DObject_core(float *A, long N1, long N2, long N3, long Z1, long Z2, char *Object,
float C0, /* intensity */
float x0, /* x0 position */
float y0, /* y0 position */
float z0, /* z0 position */
float a, /* a - size object */
float b, /* b - size object */
float c, /* c - size object */
float psi_gr1, /* rotation angle1 */
float psi_gr2, /* rotation angle2 */
float psi_gr3, /* rotation angle3 */
long tt /*temporal index, 0 - for stationary */)
{
long i, j, k, index, sub_vol_size;
float Tomorange_min, Tomorange_max, H_x, H_y, H_z, C1, a2, b2, c2, phi_rot_radian, sin_phi, cos_phi, aa, bb, cc, psi1, psi2, psi3, T;
float *Tomorange_X_Ar = NULL, *Tomorange_Y_Ar = NULL, *Tomorange_Z_Ar = NULL, *Xdel = NULL, *Ydel = NULL, *Zdel = NULL;
Tomorange_X_Ar = malloc(N1 * sizeof(float));
Tomorange_Y_Ar = malloc(N2 * sizeof(float));
Tomorange_Z_Ar = malloc(N3 * sizeof(float));
if (Tomorange_X_Ar == NULL) printf("Allocation of 'Tomorange_X_Ar' failed");
if (Tomorange_Y_Ar == NULL) printf("Allocation of 'Tomorange_Y_Ar' failed");
if (Tomorange_Z_Ar == NULL) printf("Allocation of 'Tomorange_Z_Ar' failed");
sub_vol_size = Z2 - Z1;
Tomorange_min = -1.0f;
Tomorange_max = 1.0f;
H_x = (Tomorange_max - Tomorange_min) / (N1);
H_y = (Tomorange_max - Tomorange_min) / (N2);
H_z = (Tomorange_max - Tomorange_min) / (N3);
for (i = 0; i<N1; i++) { Tomorange_X_Ar[i] = Tomorange_min + (float)i*H_x; }
for (i = 0; i<N2; i++) { Tomorange_Y_Ar[i] = Tomorange_min + (float)i*H_y; }
for (i = 0; i<N3; i++) { Tomorange_Z_Ar[i] = Tomorange_min + (float)i*H_z; }
C1 = -4.0f*logf(2.0f);
/* parameters of a model have been extracted, now run the building module */
/************************************************/
phi_rot_radian = psi_gr1*((float)M_PI / 180.0f);
sin_phi = sinf(phi_rot_radian); cos_phi = cosf(phi_rot_radian);
Xdel = malloc(N1 * sizeof(float));
if (Xdel == NULL) printf("Allocation of 'Xdel' failed");
Ydel = malloc(N2 * sizeof(float));
if (Ydel == NULL) printf("Allocation of 'Ydel' failed");
Zdel = malloc(N3 * sizeof(float));
if (Zdel == NULL) printf("Allocation of 'Zdel' failed");
for (i = 0; i<N1; i++) Xdel[i] = Tomorange_X_Ar[i] - x0;
for (i = 0; i<N2; i++) Ydel[i] = Tomorange_Y_Ar[i] - y0;
for (i = 0; i<N3; i++) Zdel[i] = Tomorange_Z_Ar[i] - z0;
psi1 = psi_gr1*((float)M_PI / 180.0f);
psi2 = psi_gr2*((float)M_PI / 180.0f);
psi3 = psi_gr3*((float)M_PI / 180.0f);
float bs[3][3] = {
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f},
{0.0f,0.0f,0.0f} };
float xh[3] = { 0.0f, 0.0f, 0.0f };
float xh3[3] = { 0.0f, 0.0f, 0.0f };
a2 = 1.0f / (a*a);
b2 = 1.0f / (b*b);
c2 = 1.0f / (c*c);
matrot3(bs, psi1, psi2, psi3); /* rotation of 3x3 matrix */
xh3[0] = x0; xh3[1] = y0; xh3[2] = z0;
matvet3(bs, xh3, xh); /* matrix-vector multiplication */
float xh1[3] = { 0.0f, 0.0f, 0.0f };
float xh2[3] = { 0.0f, 0.0f, 0.0f };
/*printf("%s %ld %ld %ld %ld %ld %f %f %f %f %f %f %f %f %f %f %ld\n", Object, N1, N2, N3, Z1, Z2, C0, x0, y0, z0, a, b, c, psi_gr1, psi_gr2, psi_gr3, tt);*/
if ((strcmp("gaussian", Object) == 0) || (strcmp("paraboloid", Object) == 0) || (strcmp("ellipsoid", Object) == 0) || (strcmp("cone", Object) == 0))
{
#pragma omp parallel for shared(A,bs) private(k,i,j,index,aa,bb,cc,T,xh2,xh1)
for (k = Z1; k<Z2; k++) {
for (i = 0; i<N1; i++) {
for (j = 0; j<N2; j++) {
index = tt*N1*N2*sub_vol_size + (k - Z1)*N1*N2 + j*N1 + i;
if ((psi1 != 0.0f) || (psi2 != 0.0f) || (psi3 != 0.0f)) {
xh1[0] = Tomorange_X_Ar[i];
xh1[1] = Tomorange_Y_Ar[j];
xh1[2] = Tomorange_Z_Ar[k];
matvet3(bs, xh1, xh2);
aa = a2*powf((xh2[0] - xh[0]), 2);
bb = b2*powf((xh2[1] - xh[1]), 2);
cc = c2*powf((xh2[2] - xh[2]), 2);
}
else {
aa = a2*powf(Xdel[i], 2);
bb = b2*powf(Ydel[j], 2);
cc = c2*powf(Zdel[k], 2);
}
T = (aa + bb + cc);
if (strcmp("gaussian", Object) == 0) {
/* The object is a volumetric gaussian */
T = C0*expf(C1*T);
}
if (strcmp("paraboloid", Object) == 0) {
/* the object is a parabola Lambda = 1/2 */
// if (T <= 1.0f) T = C0*sqrtf(1.0f - T);
if (T <= 1.0f) T = C0*(1.0f - T);
else T = 0.0f;
}
if (strcmp("ellipsoid", Object) == 0) {
/* the object is en ellipsoid */
if (T <= 1.0f) T = C0;
else T = 0.0f;
}
if (strcmp("cone", Object) == 0) {
/* the object is a cone */
if (T <= 1.0f) T = C0*(1.0f - sqrtf(T));
else T = 0.0f;
}
A[index] += T;
}
}
}
}
if (strcmp("cuboid", Object) == 0) {
/* the object is a cuboid */
float x0r, y0r, HX, HY;
a2 = 0.5f*a;
b2 = 0.5f*b;
c2 = 0.5f*c;
x0r = x0*cosf(0.0f) + y0*sinf(0.0f);
y0r = -x0*sinf(0.0f) + y0*cosf(0.0f);
if (phi_rot_radian < 0.0f) {
phi_rot_radian = (float)M_PI + phi_rot_radian;
sin_phi = sinf(phi_rot_radian);
cos_phi = cosf(phi_rot_radian);
}
#pragma omp parallel for shared(A,Zdel) private(k,i,j,HX,HY,T)
for (k = Z1; k<Z2; k++) {
if (fabs(Zdel[k]) < c2) {
for (i = 0; i<N1; i++) {
for (j = 0; j<N2; j++) {
HX = fabsf((Xdel[i] - x0r)*cos_phi + (Ydel[j] - y0r)*sin_phi);
T = 0.0f;
if (HX <= a2) {
HY = fabsf((Ydel[j] - y0r)*cos_phi - (Xdel[i] - x0r)*sin_phi);
if (HY <= b2) { T = C0; }
}
A[tt*N1*N2*sub_vol_size + (k - Z1)*N1*N2 + j*N1 + i] += T;
}
}
}
}
}
if (strcmp("elliptical_cylinder", Object) == 0) {
/* the object is an elliptical cylinder */
#pragma omp parallel for shared(A) private(k,i,j,T)
for (k = Z1; k<Z2; k++) {
if (fabs(Zdel[k]) < c) {
for (i = 0; i<N1; i++) {
for (j = 0; j<N2; j++) {
T = a2*powf((Xdel[i] * cos_phi + Ydel[j] * sin_phi), 2) + b2*powf((-Xdel[i] * sin_phi + Ydel[j] * cos_phi), 2);
if (T <= 1) T = C0;
else T = 0.0f;
A[tt*N1*N2*sub_vol_size + (k - Z1)*N1*N2 + j*N1 + i] += T;
}
}
}
} /*k-loop*/
}
/****************************************************/
free(Xdel); free(Ydel); free(Zdel);
free(Tomorange_X_Ar); free(Tomorange_Y_Ar); free(Tomorange_Z_Ar);
return *A;
}
/********************Core Function*****************************/
float TomoP3DModel_core(float *A, int ModelSelected, long N1, long N2, long N3, long Z1, long Z2, char *ModelParametersFilename)
{
int Model = 0, Components = 0, steps = 0, counter = 0, ii;
float C0 = 0.0f, x0 = 0.0f, y0 = 0.0f, z0 = 0.0f, a = 0.0f, b = 0.0f, c = 0.0f, psi_gr1 = 0.0f, psi_gr2 = 0.0f, psi_gr3 = 0.0f;
FILE *fp = fopen(ModelParametersFilename, "r"); // read parameters file
if (fp == NULL) {
printf("%s \n", "Cannot open the file");
}
else {
char str[MAXCHAR];
char tmpstr1[16];
char tmpstr2[22];
char tmpstr3[16];
char tmpstr4[16];
char tmpstr5[16];
char tmpstr6[16];
char tmpstr7[16];
char tmpstr8[16];
char tmpstr9[16];
char tmpstr10[16];
char tmpstr11[16];
char tmpstr12[16];
while (fgets(str, MAXCHAR, fp) != NULL)
{
/* work with non-# commented lines */
if (str[0] != '#') {
sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
if (strcmp(tmpstr1, "Model") == 0)
{
Model = atoi(tmpstr2);
if ((ModelSelected == Model) && (counter == 0)) {
/* check if we have a right model */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
break;
}
if (strcmp(tmpstr1, "Components") == 0) Components = atoi(tmpstr2);
//printf("%s %i\n", "Components:", Components);
if (Components <= 0) {
printf("%s %i\n", "Components cannot be negative, the given value is", Components);
break;
}
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21[^;];", tmpstr1, tmpstr2);
else {
break;
}
if (strcmp(tmpstr1, "TimeSteps") == 0) steps = atoi(tmpstr2);
if (steps <= 0) {
printf("%s %i\n", "TimeSteps cannot be negative, the given value is", steps);
break;
}
//printf("%s %i\n", "TimeSteps:", steps);
if (steps == 1) {
/**************************************************/
//printf("\n %s %i %s \n", "Stationary 3D model", ModelSelected, " is selected");
/* loop over all components */
for (ii = 0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21s %15s %15s %15s %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8, tmpstr9, tmpstr10, tmpstr11, tmpstr12);
else {
break;
}
if (strcmp(tmpstr1, "Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
z0 = (float)atof(tmpstr6); /* z0 position */
a = (float)atof(tmpstr7); /* a - size object */
b = (float)atof(tmpstr8); /* b - size object */
c = (float)atof(tmpstr9); /* c - size object */
psi_gr1 = (float)atof(tmpstr10); /* rotation angle 1*/
psi_gr2 = (float)atof(tmpstr11); /* rotation angle 2*/
psi_gr3 = (float)atof(tmpstr12); /* rotation angle 3*/
}
else {
break;
}
// printf("\nObject : %s \nC0 : %f \nx0 : %f \ny0 : %f \nz0 : %f \na : %f \nb : %f \nc : %f \n", tmpstr2, C0, x0, y0, z0, a, b, c);
TomoP3DObject_core(A, N1, N2, N3, Z1, Z2, tmpstr2, C0, y0, x0, z0, a, b, c, psi_gr1, psi_gr2, psi_gr3, 0l); /* python */
}
}
else {
/**************************************************/
//printf("\n %s \n", "Temporal model is selected");
/* temporal phantom 3D + time (4D) */
float C1 = 0.0f, x1 = 0.0f, y1 = 0.0f, z1 = 0.0f, a1 = 0.0f, b1 = 0.0f, c1 = 0.0f, psi_gr1_1 = 0.0f, psi_gr2_1 = 0.0f, psi_gr3_1 = 0.0f;
/* loop over all components */
for (ii = 0; ii<Components; ii++) {
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %21s %15s %15s %15s %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr2, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8, tmpstr9, tmpstr10, tmpstr11, tmpstr12);
else {
break;
}
if (strcmp(tmpstr1, "Object") == 0) {
C0 = (float)atof(tmpstr3); /* intensity */
x0 = (float)atof(tmpstr4); /* x0 position */
y0 = (float)atof(tmpstr5); /* y0 position */
z0 = (float)atof(tmpstr6); /* y0 position */
a = (float)atof(tmpstr7); /* a - size object */
b = (float)atof(tmpstr8); /* b - size object */
c = (float)atof(tmpstr9); /* b - size object */
psi_gr1 = (float)atof(tmpstr10); /* rotation angle 1*/
psi_gr2 = (float)atof(tmpstr11); /* rotation angle 2*/
psi_gr3 = (float)atof(tmpstr12); /* rotation angle 3*/
}
else {
break;
}
// printf("\nObject : %s \nC0 : %f \nx0 : %f \ny0 : %f \nz0 : %f \na : %f \nb : %f \n", tmpstr2, C0, x0, y0, z0, a, b, c);
/* check Endvar relatedparameters */
if (fgets(str, MAXCHAR, fp) != NULL) sscanf(str, "%15s : %15s %15s %15s %15s %15s %15s %15s %15s %15s %15[^;];", tmpstr1, tmpstr3, tmpstr4, tmpstr5, tmpstr6, tmpstr7, tmpstr8, tmpstr9, tmpstr10, tmpstr11, tmpstr12);
else break;
if (strcmp(tmpstr1, "Endvar") == 0) {
C1 = (float)atof(tmpstr3); /* intensity */
x1 = (float)atof(tmpstr4); /* x0 position */
y1 = (float)atof(tmpstr5); /* y0 position */
z1 = (float)atof(tmpstr6); /* z0 position */
a1 = (float)atof(tmpstr7); /* a - size object */
b1 = (float)atof(tmpstr8); /* b - size object */
c1 = (float)atof(tmpstr9); /* c - size object */
psi_gr1_1 = (float)atof(tmpstr10); /* rotation angle 1*/
psi_gr2_1 = (float)atof(tmpstr11); /* rotation angle 2*/
psi_gr3_1 = (float)atof(tmpstr12); /* rotation angle 3*/
}
else {
printf("%s\n", "Cannot find 'Endvar' string in parameters file");
break;
}
//printf("\nObject : %s \nC0 : %f \nx0 : %f \ny0 : %f \nz0 : %f \na : %f \nb : %f \nc : %f \n", tmpstr2, C0, x0, y0, z0, a1, b1, c1);
/*now we know the initial parameters of the object and the final ones. We linearly extrapolate to establish steps and coordinates. */
/* calculating the full distance berween the start and the end points */
float distance = sqrtf(pow((x1 - x0), 2) + pow((y1 - y0), 2) + pow((z1 - z0), 2));
float d_dist = distance / (steps - 1); /*a step over line */
float C_step = (C1 - C0) / (steps - 1);
float a_step = (a1 - a) / (steps - 1);
float b_step = (b1 - b) / (steps - 1);
float c_step = (c1 - c) / (steps - 1);
float phi_rot_step1 = (psi_gr1_1 - psi_gr1) / (steps - 1);
float phi_rot_step2 = (psi_gr2_1 - psi_gr2) / (steps - 1);
float phi_rot_step3 = (psi_gr3_1 - psi_gr3) / (steps - 1);
long tt;
float x_t, y_t, z_t, a_t, b_t, c_t, C_t, phi1_t, phi2_t, phi3_t, d_step;
/* initialize */
x_t = x0; y_t = y0; z_t = z0; a_t = a; b_t = b; c_t = c; C_t = C0; phi1_t = psi_gr1; phi2_t = psi_gr2; phi3_t = psi_gr3; d_step = d_dist;
/*loop over time frames*/
for (tt = 0; tt < (long)steps; tt++) {
TomoP3DObject_core(A, N1, N2, N3, Z1, Z2, tmpstr2, C_t, y_t, x_t, z_t, a_t, b_t, c_t, phi1_t, phi2_t, phi3_t, tt); /* python */
/* calculating new coordinates of an object */
if (distance != 0.0f) {
float t = d_step / distance;
x_t = (1 - t)*x0 + t*x1;
y_t = (1 - t)*y0 + t*y1;
z_t = (1 - t)*z0 + t*z1;
}
else {
x_t = x0;
y_t = y0;
z_t = z0;
}
d_step += d_dist;
a_t += a_step;
b_t += b_step;
c_t += c_step;
C_t += C_step;
phi1_t += phi_rot_step1;
phi2_t += phi_rot_step2;
phi3_t += phi_rot_step3;
} /*time steps*/
} /*components loop*/
}
counter++;
}
}
}
}
}
fclose(fp);
return *A;
}
|
untied_task.c | // RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#define TEST_NEED_PRINT_FRAME_FROM_OUTLINED_FN
#include "callback.h"
#include <omp.h>
int main()
{
int condition=0;
omp_set_nested(0);
print_frame(0);
#pragma omp parallel num_threads(2)
{
print_frame_from_outlined_fn(1);
print_ids(0);
print_ids(1);
print_frame(0);
#pragma omp master
{
print_ids(0);
#pragma omp task untied shared(condition)
{
OMPT_SIGNAL(condition);
print_frame(1);
print_ids(0);
print_ids(1);
print_ids(2);
#pragma omp task if(0)
{
print_ids(0);
print_ids(1);
print_ids(2);
}
print_ids(0);
print_ids(1);
print_ids(2);
}
OMPT_WAIT(condition,1);
print_ids(0);
}
#pragma omp barrier
print_ids(0);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_create'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_task_schedule'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_implicit_task'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquire'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_acquired'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_mutex_released'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// make sure initial data pointers are null
// CHECK-NOT: 0: new_task_data initially not null
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: __builtin_frame_address(0)=[[MAIN_REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=0x{{[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=2, codeptr_ra=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// nested parallel masters
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID:[0-9]+]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// <- ompt_event_task_create would be expected here
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_task_create: parent_task_id=[[IMPLICIT_TASK_ID]], parent_task_frame.exit=[[EXIT]], parent_task_frame.reenter=0x{{[0-f]+}}, new_task_id=[[TASK_ID:[0-9]+]], codeptr_ra=[[TASK_FUNCTION:0x[0-f]+]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// explicit barrier after master
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// implicit barrier parallel
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address({{.}})=[[EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(0)=[[REENTER:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}}
// this is expected to come earlier and at MASTER:
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[IMPLICIT_TASK_ID]], second_task_id=[[TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: __builtin_frame_address(1)=[[TASK_EXIT:0x[0-f]+]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]], exit_frame=[[TASK_EXIT]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: task level 1: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[EXIT]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: task level 2: parallel_id=[[IMPLICIT_PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_schedule: first_task_id=[[TASK_ID]], second_task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_task_end: task_id=[[TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]], exit_frame=[[NULL]], reenter_frame=[[NULL]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
episerver_fmt_plug.c | /* *New* EPiServer cracker patch for JtR. Hacked together during Summer of
* 2012 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC. Based on sample
* code by hashcat's atom.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Obtaining hashes from EPiServer 6.x:
*
* sqlcmd -L
* sqlcmd -S <server> -U sa -P <password> *
* 1> SELECT name from sys.databases
* 2> go
* 1> use <database name>
* 2> select Email, PasswordFormat, PasswordSalt, Password from aspnet_Membership
* 3> go
*
* JtR Input Format:
*
* user:$episerver$*version*base64(salt)*base64(hash)
*
* Where,
*
* version == 0, for EPiServer 6.x standard config / .NET <= 3.5 SHA1 hash/salt format.
* hash = sha1(salt | utf16bytes(password)), PasswordFormat == 1 *
*
* version == 1, EPiServer 6.x + .NET >= 4.x SHA256 hash/salt format,
* PasswordFormat == ?
*
* Improved performance, JimF, July 2012.
* Full Unicode support, magnum, August 2012.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_episerver;
#elif FMT_REGISTERS_H
john_register_one(&fmt_episerver);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "sha.h"
#include "sha2.h"
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "base64.h"
#include "unicode.h"
#include "memdbg.h"
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // core i7 no HT
#endif
#endif
#define FORMAT_LABEL "EPiServer"
#define FORMAT_NAME ""
#define FORMAT_TAG "$episerver$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define BINARY_SIZE 32 /* larger of the two */
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct custom_salt)
#define EFFECTIVE_SALT_SIZE 16
#define SALT_ALIGN 4
#ifdef SIMD_COEF_32
#include "simd-intrinsics.h"
#include "johnswap.h"
#define NBKEYS_SHA1 (SIMD_COEF_32 * SIMD_PARA_SHA1)
#define NBKEYS_SHA256 (SIMD_COEF_32 * SIMD_PARA_SHA256)
#define NBKEYS (SIMD_COEF_32 * SIMD_PARA_SHA1 * SIMD_PARA_SHA256)
#define HASH_IDX_IN (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32)
#define HASH_IDX_SHA1 (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*5*SIMD_COEF_32)
#define HASH_IDX_SHA256 (((unsigned int)index&(SIMD_COEF_32-1))+(unsigned int)index/SIMD_COEF_32*8*SIMD_COEF_32)
#define HASH_IDX_OUT (cur_salt->version == 0 ? HASH_IDX_SHA1 : HASH_IDX_SHA256)
#define GETPOS(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*4*SIMD_COEF_32 ) //for endianness conversion
#define ALGORITHM_NAME "SHA1/SHA256 " SHA256_ALGORITHM_NAME
#define PLAINTEXT_LENGTH 19 // (64 - 9 - 16)/2
#define MIN_KEYS_PER_CRYPT NBKEYS
#define MAX_KEYS_PER_CRYPT NBKEYS
#else
#define ALGORITHM_NAME "SHA1/SHA256 32/" ARCH_BITS_STR " " SHA2_LIB
#define PLAINTEXT_LENGTH 32
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 16
#endif
static struct fmt_tests episerver_tests[] = {
{"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*UQgnz/vPWap9UeD8Dhaw3h/fgFA=", "testPassword"},
{"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*uiP1YrZlVcHESbfsRt/wljwNeYU=", "sss"},
{"$episerver$*0*fGJ2wn/5WlzqQoDeCA2kXA==*dxTlKqnxaVHs0210VcX+48QDonA=", "notused"},
// hashes from pass_gen.pl, including some V1 data
{"$episerver$*0*OHdOb002Z1J6ZFhlRHRzbw==*74l+VCC9xkGP27sNLPLZLRI/O5A", "test1"},
{"$episerver$*0*THk5ZHhYNFdQUDV1Y0hScg==*ik+FVrPkEs6LfJU88xl5oBRoZjY", ""},
{"$episerver$*1*aHIza2pUY0ZkR2dqQnJrNQ==*1KPAZriqakiNvE6ML6xkUzS11QPREziCvYkJc4UtjWs","test1"},
{"$episerver$*1*RUZzRmNja0c5NkN0aDlMVw==*nh46rc4vkFIL0qGUrKTPuPWO6wqoESSeAxUNccEOe28","thatsworking"},
{"$episerver$*1*cW9DdnVVUnFwM2FobFc4dg==*Zr/nekpDxU5gjt+fzTSqm0j/twZySBBW44Csoai2Fug","test3"},
{"$episerver$*0*b0lvUnlWbkVlSFJQTFBMeg==*K7NAoB/wZfZjsG4DuMkNqKYwfTs", "123456789"},
{NULL}
};
#ifdef SIMD_COEF_32
static uint32_t *saved_key;
static uint32_t *crypt_out;
#else
static char (*saved_key)[3 * PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
#endif
static struct custom_salt {
int version;
unsigned char esalt[18 + 1]; /* base64 decoding, 24 / 4 * 3 = 18 */
} *cur_salt;
#if defined(_OPENMP) || defined(SIMD_COEF_32)
static int omp_t = 1;
#endif
#ifdef SIMD_COEF_32
static void episerver_set_key_utf8(char *_key, int index);
static void episerver_set_key_CP(char *_key, int index);
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
saved_key = mem_calloc_align(self->params.max_keys_per_crypt*SHA_BUF_SIZ,
sizeof(*saved_key), MEM_ALIGN_SIMD);
crypt_out = mem_calloc_align(self->params.max_keys_per_crypt*BINARY_SIZE/4,
sizeof(*crypt_out), MEM_ALIGN_SIMD);
#else
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
#endif
#ifdef SIMD_COEF_32
if (options.target_enc == UTF_8) {
self->methods.set_key = episerver_set_key_utf8;
self->params.plaintext_length = PLAINTEXT_LENGTH * 3;
}
else if (options.target_enc != ISO_8859_1 &&
options.target_enc != ASCII)
self->methods.set_key = episerver_set_key_CP;
#else
if (options.target_enc == UTF_8)
self->params.plaintext_length = PLAINTEXT_LENGTH * 3;
#endif
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN; /* skip leading '$episerver$*' */
if (strlen(ciphertext) > 255)
goto error;
if (!(ptr = strtokm(ctcopy, "*")))
goto error;
/* check version, must be '0' or '1' */
if (*ptr != '0' && *ptr != '1')
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* salt */
goto error;
if (strlen(ptr) > 24)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* hash */
goto error;
if (strlen(ptr) > 44)
goto error;
if ((ptr = strtokm(NULL, "*"))) /* end */
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char _ctcopy[256], *ctcopy=_ctcopy;
char *p;
memset(&cs, 0, sizeof(cs));
strncpy(ctcopy, ciphertext, 255);
ctcopy[255] = 0;
ctcopy += FORMAT_TAG_LEN; /* skip over "$episerver$*" */
p = strtokm(ctcopy, "*");
cs.version = atoi(p);
p = strtokm(NULL, "*");
base64_decode(p, strlen(p), (char*)cs.esalt);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE + 1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
memset(buf.c, 0, sizeof(buf.c));
p = strrchr(ciphertext, '*') + 1;
base64_decode(p, strlen(p), (char*)out);
#ifdef SIMD_COEF_32
alter_endianity(out, BINARY_SIZE);
#endif
return out;
}
#ifdef SIMD_COEF_32
static int get_hash_0 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_0; }
static int get_hash_1 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_1; }
static int get_hash_2 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_2; }
static int get_hash_3 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_3; }
static int get_hash_4 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_4; }
static int get_hash_5 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_5; }
static int get_hash_6 (int index) { return crypt_out[HASH_IDX_OUT] & PH_MASK_6; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#endif
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
int index, j;
cur_salt = (struct custom_salt *)salt;
for (index = 0; index < MAX_KEYS_PER_CRYPT*omp_t; ++index)
for (j = 0; j < EFFECTIVE_SALT_SIZE; ++j) // copy the salt to vector buffer
((unsigned char*)saved_key)[GETPOS(j, index)] = ((unsigned char*)cur_salt->esalt)[j];
#else
cur_salt = (struct custom_salt *)salt;
#endif
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
#ifdef SIMD_COEF_32
for (index = 0; index < count; index += (cur_salt->version == 0 ? NBKEYS_SHA1 : NBKEYS_SHA256))
{
uint32_t *in = &saved_key[HASH_IDX_IN];
uint32_t *out = &crypt_out[HASH_IDX_OUT];
if(cur_salt->version == 0)
SIMDSHA1body(in, out, NULL, SSEi_MIXED_IN);
else if(cur_salt->version == 1)
SIMDSHA256body(in, out, NULL, SSEi_MIXED_IN);
}
#else
for (index = 0; index < count; index++)
{
unsigned char passwordBuf[PLAINTEXT_LENGTH*2+2];
int len;
len = enc_to_utf16((UTF16*)passwordBuf, PLAINTEXT_LENGTH,
(UTF8*)saved_key[index], strlen(saved_key[index]));
if (len < 0)
len = strlen16((UTF16*)passwordBuf);
len <<= 1;
if(cur_salt->version == 0) {
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->esalt, EFFECTIVE_SALT_SIZE);
SHA1_Update(&ctx, passwordBuf, len);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
else if(cur_salt->version == 1) {
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, cur_salt->esalt, EFFECTIVE_SALT_SIZE);
SHA256_Update(&ctx, passwordBuf, len);
SHA256_Final((unsigned char*)crypt_out[index], &ctx);
}
}
#endif
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++) {
#ifdef SIMD_COEF_32
if (*((uint32_t*)binary) == crypt_out[HASH_IDX_OUT])
#else
if (*((ARCH_WORD_32*)binary) == crypt_out[index][0])
#endif
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
#if SIMD_COEF_32
return *((uint32_t*)binary) == crypt_out[HASH_IDX_OUT];
#else
return (*((ARCH_WORD_32*)binary) == crypt_out[index][0]);
#endif
}
static int cmp_exact(char *source, int index)
{
void *binary = get_binary(source);
#if SIMD_COEF_32
uint32_t out[BINARY_SIZE/4];
int i;
for (i = 0; i < BINARY_SIZE/4; ++i)
out[i] = crypt_out[HASH_IDX_OUT + i*SIMD_COEF_32];
if(cur_salt->version == 0)
return !memcmp(binary, out, 20);
else
return !memcmp(binary, out, BINARY_SIZE);
#else
if(cur_salt->version == 0)
return !memcmp(binary, crypt_out[index], 20);
else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static void episerver_set_key(char *_key, int index)
{
#ifdef SIMD_COEF_32
unsigned char *key = (unsigned char*)_key;
uint32_t *keybuf = &saved_key[HASH_IDX_IN];
uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt
unsigned int len, temp2;
len = EFFECTIVE_SALT_SIZE >> 1;
while((temp2 = *key++)) {
unsigned int temp;
if ((temp = *key++))
{
*keybuf_word = JOHNSWAP((temp << 16) | temp2);
}
else
{
*keybuf_word = JOHNSWAP((0x80 << 16) | temp2);
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = (0x80U << 24);
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuf[15*SIMD_COEF_32] = len << 4;
#else
strcpy(saved_key[index], _key);
#endif
}
#ifdef SIMD_COEF_32
static void episerver_set_key_CP(char *_key, int index)
{
unsigned char *key = (unsigned char*)_key;
uint32_t *keybuf = &saved_key[HASH_IDX_IN];
uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt
unsigned int len, temp2;
len = EFFECTIVE_SALT_SIZE >> 1;
while((temp2 = *key++)) {
unsigned int temp;
temp2 = CP_to_Unicode[temp2];
if ((temp = *key++))
{
temp = CP_to_Unicode[temp];
*keybuf_word = JOHNSWAP((temp << 16) | temp2);
}
else
{
*keybuf_word = JOHNSWAP((0x80 << 16) | temp2);
len++;
goto key_cleaning;
}
len += 2;
keybuf_word += SIMD_COEF_32;
}
*keybuf_word = (0x80U << 24);
key_cleaning:
keybuf_word += SIMD_COEF_32;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuf[15*SIMD_COEF_32] = len << 4;
}
#endif
#ifdef SIMD_COEF_32
static void episerver_set_key_utf8(char *_key, int index)
{
const UTF8 *source = (UTF8*)_key;
uint32_t *keybuf = &saved_key[HASH_IDX_IN];
uint32_t *keybuf_word = keybuf + 4*SIMD_COEF_32; // skip over the salt
UTF32 chl, chh = 0x80;
unsigned int len;
len = EFFECTIVE_SALT_SIZE >> 1;
while (*source) {
chl = *source;
if (chl >= 0xC0) {
unsigned int extraBytesToRead = opt_trailingBytesUTF8[chl & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chl -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
if (chl > UNI_MAX_BMP) {
if (len == PLAINTEXT_LENGTH + (EFFECTIVE_SALT_SIZE>>1)) {
chh = 0x80;
*keybuf_word = JOHNSWAP((chh << 16) | chl);
keybuf_word += SIMD_COEF_32;
break;
}
#define halfBase 0x0010000UL
#define halfShift 10
#define halfMask 0x3FFUL
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_LOW_START (UTF32)0xDC00
chl -= halfBase;
chh = (UTF16)((chl & halfMask) + UNI_SUR_LOW_START);;
chl = (UTF16)((chl >> halfShift) + UNI_SUR_HIGH_START);
len++;
} else if (*source && len < PLAINTEXT_LENGTH + (EFFECTIVE_SALT_SIZE>>1)) {
chh = *source;
if (chh >= 0xC0) {
unsigned int extraBytesToRead =
opt_trailingBytesUTF8[chh & 0x3f];
switch (extraBytesToRead) {
case 3:
++source;
if (*source) {
chl <<= 6;
chl += *source;
} else
goto bailout;
case 2:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 1:
++source;
if (*source) {
chh <<= 6;
chh += *source;
} else
goto bailout;
case 0:
break;
default:
goto bailout;
}
chh -= offsetsFromUTF8[extraBytesToRead];
}
source++;
len++;
} else {
chh = 0x80;
*keybuf_word = JOHNSWAP((chh << 16) | chl);
keybuf_word += SIMD_COEF_32;
break;
}
*keybuf_word = JOHNSWAP((chh << 16) | chl);
keybuf_word += SIMD_COEF_32;
}
if (chh != 0x80 || len == (EFFECTIVE_SALT_SIZE>>1)) {
*keybuf_word = (0x80U << 24);
keybuf_word += SIMD_COEF_32;
}
bailout:
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += SIMD_COEF_32;
}
keybuf[15*SIMD_COEF_32] = len << 4;
}
#endif
static char *get_key(int index)
{
#ifdef SIMD_COEF_32
static UTF16 out[PLAINTEXT_LENGTH + 1];
unsigned int i,s;
s = ((saved_key[HASH_IDX_IN + 15*SIMD_COEF_32] >> 3) - 16) >> 1;
for(i = 0; i < s; i++)
out[i] = ((unsigned char*)saved_key)[GETPOS(16 + (i<<1), index)] | (((unsigned char*)saved_key)[GETPOS(16 + (i<<1) + 1, index)] << 8);
out[i] = 0;
return (char*)utf16_to_enc(out);
#else
return saved_key[index];
#endif
}
/* report hash type: 1 SHA1, 2 SHA256 */
static unsigned int hash_type(void *salt)
{
struct custom_salt *my_salt = salt;
return (unsigned int) (1 + my_salt->version);
}
struct fmt_main fmt_episerver = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8,
{
"hash type [1: SHA1 2:SHA256]",
},
{ FORMAT_TAG },
episerver_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
hash_type,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
episerver_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unaryop__abs_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint64_uint32
// op(A') function: GB_tran__abs_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
evolve_turing.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <time.h>
#include <omp.h>
#include "turing.h"
#include "evolve_turing.h"
#include "pqueue.h"
#include "common.h"
tTransTableItem * Pregen_tuples;
int Pregen_tuples_cnt;
#pragma omp threadprivate(Pregen_tuples, Pregen_tuples_cnt)
inline int get_max_steps(int input_len) {
return input_len*input_len*input_len;
}
void calc_tape_metrics(tTape * tape, tTapeMetrics *metrics) {
signed char sym=0, prevSym;
int i, first=1;
int symbols=sizeof(metrics->symbol_count)/sizeof(*(metrics->symbol_count));
// init of the symbol frequency counter
for (i=0; i<symbols; i++) metrics->symbol_count[i]=0;
metrics->correct_order=0;
// calculate the symbol frequency and number of correctly ordered pairs
for (i=1; i<tape->input_len; i++) {
prevSym=sym;
sym=tape->content[i];
if (first) first=0;
else
if (sym >= prevSym) metrics->correct_order++;
metrics->symbol_count[sym]++;
}
tape->metrics=metrics;
}
void calc_all_tapes_metrics(tTape * tapes, tTapeMetrics * metrics, int n) {
tTape * tape=tapes;
tTapeMetrics * metric=metrics;
int i;
for (i=0; i<n; i++, tape++, metric++) {
calc_tape_metrics(tape, metric);
}
}
inline void init_tape(tTape * orig_tape, tTape * work_tape) {
int len=orig_tape->input_len;
memcpy(work_tape->content, orig_tape->content, len);
memset(work_tape->content+len, BLANK, TAPE_LEN-len);
work_tape->input_len=orig_tape->input_len;
}
void init_evolution(int states, int symbols) {
int symbol, state, shift, i=0;
/**
* (states+1), because final state with nr. "states" is in the table content,
* but is not used as index!
* (symbols+1), because symbol=-1 (Empty=no write) is in the table content
* but is not used as index!
* */
Pregen_tuples_cnt=(states+1)*(symbols+1)*SHIFTS;
Pregen_tuples=malloc(Pregen_tuples_cnt * sizeof(tTransTableItem));
for (shift=0; shift<SHIFTS; shift++)
for (symbol=-1; symbol<symbols; symbol++)
for (state=0; state<=states; state++) {
Pregen_tuples[i].state=state;
Pregen_tuples[i].symbol=symbol;
Pregen_tuples[i++].shift=shift;
}
}
double eval_sorting_fitness(tTransitions * t, tTape * tape, tTapeMetrics * orig_metrics) {
/**
* first, we measure the number of correctly ordered pairs
* and compare the count of the distinct symbols with the original.
* Then we calculate the fitness from these 2 numbers + nr. of steps and new_symbols written
*/
tStatus status = { 0, 0, 0, 0, 0};
tTapeMetrics new_metrics;
int i, correct_count, orig_unordered_cnt, delta_ordered_cnt, max_steps;
double fit_correct, fit_time, fit_space;
// init of the symbol frequency counter
max_steps=get_max_steps(tape->input_len);
turing(tape, t, max_steps, &status);
/* for completely wrong results, there is no need to calculate fitness...
if (status.error<0) return -1;
*/
calc_tape_metrics(tape, &new_metrics);
for (i=0, correct_count=0; i<t->symbols; i++)
if (orig_metrics->symbol_count[i]==new_metrics.symbol_count[i]) correct_count++;
orig_unordered_cnt=tape->input_len-orig_metrics->correct_order-2-1; // -2=two BLANKs, -1 = usual "magic 1"
delta_ordered_cnt=new_metrics.correct_order - orig_metrics->correct_order;
if (orig_unordered_cnt<1) {
orig_unordered_cnt=1; //can't divide by 0
if (delta_ordered_cnt>=0) delta_ordered_cnt=1;
}
/**
* Correctness = 0.5*Correct_symbol_count + 0.5*Delta_of_correctly_ordered_pairs
*/
fit_correct=((double)correct_count/t->symbols + (double)delta_ordered_cnt/orig_unordered_cnt)/2;
fit_time=1-(double)(status.steps + status.writes)/(2*max_steps);
fit_space=1-(double)(2+status.head_max-tape->input_len)/(2+TAPE_LEN-tape->input_len);
if (log_level>=LOG_DEBUG_3) {
printf("Fitness: correctness=%.2lf, time complexity=%.2lf, space complexity=%.2lf\n",
fit_correct, fit_time, fit_space);
}
return 0.5*fit_correct + 0.25*fit_time + 0.25*fit_space;
}
double eval_sorting_fitness_n_tapes(tTransitions * t, tTape * orig_tapes, int n, char * tape_log) {
tTape work_tapes[n],
* work_tape=work_tapes, * orig_tape=orig_tapes;
char * tape_log_start=tape_log;
double fitness, result=0;
int i, j;
for (i=0; i<n; i++, orig_tape++, work_tape++) {
init_tape(orig_tape, work_tape);
fitness=eval_sorting_fitness(t, work_tape, orig_tape->metrics);
if (fitness<0) return -1;
else result+=fitness;
for (j=0; j<work_tape->input_len; j++)
if (tape_log < tape_log_start + TAPE_LOG_SIZE - 10)
tape_log+=sprintf(tape_log, "%d,", work_tape->content[j]);
tape_log+=sprintf(tape_log, "\n");
if (log_level>=LOG_ALL_2)
puts(tape_log_start);
}
if (log_level>=LOG_ALL_2) printf("Fitness sum=%.2lf\n", result);
return result;
}
unsigned long seed;
void generate_population(tTransTableItem * population, tIndividual * population_fitness,
tParams * params) {
int i, st, sy;
tTransTableItem * individual, * transition;
tTransitions trans={params->states, params->symbols};
#pragma omp atomic
seed+=10000;
srand (seed+time(NULL));
individual=transition=population;
for (i=0; i<params->population_size; i++) { // for all the individuals
if (log_level>=LOG_DEBUG_3)
printf("Generating individual %d\n", i);
for (st=0; st<params->states; st++) // for all their states
for (sy=0; sy<params->symbols; sy++) // for all their symbols
*transition++=Pregen_tuples[rand()%Pregen_tuples_cnt];
trans.table=population_fitness[i].table=individual;
individual=transition;
}
}
inline int nr_of_best(int generation, int population_size) {
int divisor;
if (population_size>100) divisor=4;//+generation/2;
else divisor=2;//+generation/4;
if (divisor*2 > population_size) return 2;
else return population_size/divisor;
}
inline int nr_of_kids(int generation, int i, int population_size) {
return 10;
if (population_size>10000) return population_size/100;
else if (population_size>1000) return population_size/10;
else if (population_size>100) return population_size/5;
else return population_size/3;
}
inline void mutate(tIndividual * parent, tIndividual * kid, int states, int symbols) {
int table_size=states*symbols, trans_nr, mutations=rand()%table_size, i;
//first of all: copy the parent table into the kid's table
for (i=0; i<table_size; i++) kid->table[i]=parent->table[i];
//then, make the mutation(s)
for (i=0; i<mutations; i++) {
trans_nr=rand()%table_size;
kid->table[trans_nr]=Pregen_tuples[rand()%Pregen_tuples_cnt];
}
}
void dump(tIndividual * individual, ulong generation, tParams * params, int thread_id, char * tape_log, ulong restarts) {
tTransTableItem * t = individual->table;
int st, sy;
unsigned long ulong_fit;
char fname [255];
FILE * f, *ft;
if (individual->fitness < ULONG_MAX/1e9)
ulong_fit=1e8*individual->fitness;
else
ulong_fit=ULONG_MAX;
sprintf(fname, "%s/%.9lu-%d-%lu-%lu.gv",
params->output, ulong_fit, thread_id, generation, restarts);
if ( (f=fopen(fname, "w"))==NULL ||
(ft=fopen(strcat(fname,".txt"), "w"))==NULL) {
fprintf(stderr, "Error: Can't open file for new graph, exiting.\n");
exit(EXIT_FAILURE);
}
fprintf(f,
"digraph \"Finite state machine, fitness=%.6lf, "
"population_size=%d, states=%d, symbols=%d, "
"best_cnt=%d, kids_cnt=%d\" {\n"
" rankdir=LR;\n"
" size=\"8,5\"\n"
"S12 [shape=doublecircle];\n"
" node [shape = circle];\n",
individual->fitness,
params->population_size, params->states, params->symbols,
params->best_cnt, params->kids_cnt
);
if (log_level>=LOG_BEST_1)
printf("Fitness=%.6lf, thread_id=%d, generation=%lu, restarts=%lu\n",
individual->fitness, thread_id, generation, restarts);
for (st=0; st<params->states; st++) // for all their states
for (sy=0; sy<params->symbols; sy++, t++) {// for all their symbols
fprintf(ft, "{ %d, %d, %d },\n", t->state, t->symbol, t->shift);
fprintf(f, " S%d -> S%d [ label = \"%d / %d, %s\" ];\n",
st, t->state, sy, t->symbol, shift2str(t->shift));
}
fprintf(f, "}\n");
fprintf(ft, "Tape content:\n%s", tape_log);
fclose(f);
fclose(ft);
}
int evolve_turing(tParams * params, tTape * sample_tapes, int nr_of_tapes) {
int thread_id, population_size=params->population_size,
symbols=params->symbols,
states=params->states;
tTransTableItem population[population_size*symbols*states];
tIndividual population_fitness [population_size],
* parent, * new_kid_place;
char tape_log[TAPE_LOG_SIZE]; // this is a bit unsafe - I should better calculate how big the log should be...
tTransitions trans={states, symbols};
pqueue_t * pqueue = pqueue_init(population_size);
ulong generation=0, i, kid, new_pos,
last_success_generation=0, restarts=0;
//ulong best_cnt, kids_cnt ;
double old_fitness;
thread_id=omp_get_thread_num();
if (population==NULL || population_fitness==NULL || pqueue==NULL) {
fprintf(stderr, "Can't allocate memory for such a population size!\n");
exit(-1);
}
init_evolution(states, symbols);
generate_population(population, population_fitness, params);
for (i=0; i<population_size; i++) {
trans.table=population_fitness[i].table;
population_fitness[i].fitness=eval_sorting_fitness_n_tapes(&trans, sample_tapes, nr_of_tapes, tape_log);
pqueue_insert(pqueue, &population_fitness[i]);
}
while (1) {
// for each of the best individuals in population:
//best_cnt=nr_of_best(generation, population_size);
for (i=1; i<params->best_cnt; i++) {
parent=pqueue_get(pqueue, i); // get the i-th top ranking individuals:
old_fitness=parent->fitness;
//kids_cnt=nr_of_kids(generation, i, population_size);
for (kid=0; kid<params->kids_cnt; kid++) { // generate new kids:
/** create new mutation of the i-th parent
* and store it in place of one of the worst individual.
* Note: It can happen that the parent becomes the worst individual inside this loop,
* and thus will be replaced by one of its kids/mutations, but that's...life.
*/
new_kid_place=pqueue_get(pqueue, population_size);
mutate(parent, new_kid_place, states, symbols);
trans.table=new_kid_place->table;
new_kid_place->fitness=eval_sorting_fitness_n_tapes(&trans, sample_tapes, nr_of_tapes, tape_log);
new_pos=pqueue_priority_changed(pqueue, old_fitness, population_size);
if (new_pos==1) {
dump(new_kid_place, generation, params, thread_id, tape_log, restarts);
last_success_generation=generation;
}
}
}
if (log_level>=LOG_BEST_1)
printf("Generation %lu finished\n", generation);
generation++;
if (generation-last_success_generation > params->degeneration_cnt) {
printf("Thread %d: point of degeneration reached. Generating the whole new population\n", thread_id);
restarts++;
last_success_generation=generation;
pqueue_reset(pqueue);
generate_population(population, population_fitness, params);
for (i=0; i<population_size; i++) {
trans.table=population_fitness[i].table;
population_fitness[i].fitness=eval_sorting_fitness_n_tapes(&trans, sample_tapes, nr_of_tapes, tape_log);
pqueue_insert(pqueue, &population_fitness[i]);
}
}
}
}
|
ast-dump-openmp-target-teams-distribute-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target teams distribute simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target teams distribute simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target teams distribute simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target teams distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target teams distribute simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-teams-distribute-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:4:1, col:41>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:10:1, col:41>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:17:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 1
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:24:1, col:53>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | | |-value: Int 2
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:51> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetTeamsDistributeSimdDirective {{.*}} <line:31:1, col:53>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:42, col:52>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:51> 'int'
// CHECK-NEXT: | |-value: Int 2
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:51> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit {{.*}}
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (unnamed at {{.*}}ast-dump-openmp-target-teams-distribute-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
timer.h | /*
* timer.h
*
* Common timer functionality
*
* Created by John Linford on 4/8/08.
* Copyright 2008 Transatlantic Giraffe. All rights reserved.
*
*/
#ifndef __TIMER_H__
#define __TIMER_H__
/**************************************************
* Includes *
**************************************************/
#include <stdint.h>
/**************************************************
* Macros *
**************************************************/
#define NUM_TIMERS 8
/**************************************************
* Data types *
**************************************************/
/* Stopwtch for gathering metrics */
typedef struct stopwatch
{
float start;
float elapsed;
} stopwatch_t;
/* Thread metrics */
typedef struct metrics
{
stopwatch_t wallclock;
stopwatch_t array_init;
stopwatch_t array_copy;
stopwatch_t file_io;
stopwatch_t x_discret;
stopwatch_t y_discret;
stopwatch_t z_discret;
stopwatch_t chem;
char name[128-NUM_TIMERS*sizeof(stopwatch_t)];
} metrics_t;
/**************************************************
* Globals *
**************************************************/
extern char* timer_names[NUM_TIMERS];
/**************************************************
* Function Prototypes *
**************************************************/
float elapsed_time();
void metrics_init( metrics_t* m, char* name);
/**************************************************
* Inline fuctions *
**************************************************/
static inline void timer_start( stopwatch_t* t)
{
#pragma omp critical
{
t->start = elapsed_time();
}
}
static inline void timer_stop( stopwatch_t* t)
{
#pragma omp critical
{
t->elapsed += elapsed_time() - t->start;
}
}
static inline int64_t year2sec(int32_t years)
{
return years * 31556926;
}
static inline int32_t day2sec(int32_t days)
{
return days * 86400;
}
static inline int32_t hour2sec(int32_t hours)
{
return hours * 3600;
}
static inline int32_t minute2sec(int32_t minutes)
{
return minutes * 60;
}
static inline int32_t sec2year(int64_t seconds)
{
return seconds / 31556926;
}
static inline int32_t sec2day(int32_t seconds)
{
return seconds / 86400;
}
static inline int32_t sec2hour(int32_t seconds)
{
return seconds / 3600;
}
static inline int32_t sec2minute(int32_t seconds)
{
return seconds / 60;
}
#endif
|
gm_dfs_template.h | #ifndef GM_DFS_TEMPLATE_H
#define GM_DFS_TEMPLATE_H
#include <omp.h>
#include <string.h>
#include <set>
#include <vector>
#include "gm_graph.h"
//-----------------------------------------------
// template for DFS
// Note that recursion-base DFS will surely crash due to
// stack overflow, when applied to small-world graphs.
// (It will visit O(N) nodes before ever pop up)
// Thus, here we implement DFS withour recursion.
//-----------------------------------------------
struct _dfs_state
{
_dfs_state(node_t N, edge_t I, edge_t E) :
node(N), idx(I), end(E) {
}
node_t node; // node
edge_t idx; // edge idx
edge_t end; //
};
template<bool has_pre_visit, bool has_post_visit, bool has_navigator, bool use_reverse_edge>
class gm_dfs_template
{
protected:
virtual void visit_pre(node_t t)=0;
virtual void visit_post(node_t t)=0;
virtual bool check_navigator(node_t t, edge_t idx)=0;
public:
gm_dfs_template(gm_graph& _G) :
G(_G) {
visited_bitmap = NULL; // bitmap
}
virtual ~gm_dfs_template() {
delete visited_bitmap;
}
void prepare(node_t root_node) {
root = root_node;
cnt = 0;
visited_small.clear();
is_small = true;
curr_node = INVALID_NODE;
curr_idx = 0;
curr_end = 0;
THRESHOLD_LARGE = std::max((int)(G.num_nodes()*0.1), 4096);
}
void do_dfs() {
enter_node(root);
main_loop();
}
private:
void prepare_large() {
delete[] visited_bitmap;
visited_bitmap = new unsigned char[(G.num_nodes() + 7) / 8];
#pragma omp parallel for
for (int i = 0; i < (G.num_nodes() + 7) / 8; i++)
visited_bitmap[i] = 0;
std::set<node_t>::iterator I;
for (I = visited_small.begin(); I != visited_small.end(); I++) {
node_t u = *I;
_gm_set_bit(visited_bitmap, u);
}
is_small = false;
stack.reserve(G.num_nodes());
}
void enter_node(node_t n) {
// push current node
_dfs_state S(curr_node, curr_idx, curr_end);
stack.push_back(S);
curr_node = n;
curr_idx = (use_reverse_edge) ? G.r_begin[n] : G.begin[n];
curr_end = (use_reverse_edge) ? G.r_begin[n + 1] : G.begin[n + 1];
// mark visited
add_visited(n);
cnt++;
if (cnt == THRESHOLD_LARGE) // if go over threshold, it will probably visit all the nodes
{
prepare_large();
}
if (has_pre_visit) visit_pre(n);
}
void exit_node(node_t n) {
if (has_post_visit) visit_post(n);
_dfs_state S = stack.back();
stack.pop_back();
curr_node = S.node;
curr_idx = S.idx;
curr_end = S.end;
}
void main_loop() {
//----------------------------------
// Repeat until stack is empty
//----------------------------------
while (curr_node != INVALID_NODE) {
//----------------------------------
// Every neighbor has been visited
//----------------------------------
if (curr_idx == curr_end) {
exit_node(curr_node);
continue;
}
else {
//----------------------------------
// check every non-visited neighbor
//----------------------------------
node_t z;
if (use_reverse_edge) {
z = G.r_node_idx[curr_idx];
} else {
z = G.node_idx[curr_idx];
}
if (has_visited(z)) {
curr_idx++;
continue;
}
if (has_navigator) {
if (check_navigator(z, curr_idx) == false) {
curr_idx++;
continue;
}
}
curr_idx++;
enter_node(z);
continue;
}
}
}
void add_visited(node_t n) {
if (is_small)
visited_small.insert(n);
else
_gm_set_bit(visited_bitmap, n);
}
bool has_visited(node_t n) {
if (is_small) {
return (visited_small.find(n) != visited_small.end());
} else {
return _gm_get_bit(visited_bitmap, n);
}
}
protected:
node_t root;
gm_graph& G;
// stack implementation
node_t stack_ptr;
std::vector<_dfs_state> stack;
node_t curr_node;
edge_t curr_idx;
edge_t curr_end;
// visited set implementation
node_t cnt;
unsigned char* visited_bitmap;
std::set<node_t> visited_small;
bool is_small;
int THRESHOLD_LARGE;
static const node_t INVALID_NODE = -1;
};
#endif
|
ensemble_res_comp.c | /*
* Copyright 2016 Ahnaf Siddiqui, Mohsen Botlani and Sameer Varma
*
* This program uses the GROMACS molecular simulation package API.
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
* Copyright (c) 2013,2014, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed at http://www.gromacs.org.
* g_ensemble_comp quantifies the difference between two conformational ensembles (two trajectory files)
* Quantification is in terms of a true metric, eta=1-Overlap
* Leighty and Varma, Quantifying Changes in Intrinsic Molecular Motion Using Support Vector Machines, J. Chem. Theory Comput. 2013, 9, 868-875.
*/
#include "ensemble_res_comp.h"
#include "gkut_io.h"
#include "gkut_log.h"
#ifdef _OPENMP
#include <omp.h>
#endif
static void free_svm_model(struct svm_model *model);
static void res_pdb(eta_res_dat_t *eta_dat, t_atoms *atoms) {
char title[256];
rvec *x;
atoms->nr = eta_dat->natoms_all;
snew(atoms->atom, eta_dat->natoms_all);
snew(atoms->atomname, eta_dat->natoms_all);
snew(atoms->atomtype, eta_dat->natoms_all);
snew(atoms->atomtypeB, eta_dat->natoms_all);
atoms->nres = eta_dat->natoms_all;
snew(atoms->resinfo, eta_dat->natoms_all);
snew(atoms->pdbinfo, eta_dat->natoms_all);
snew(x, eta_dat->natoms_all);
read_pdb_conf(eta_dat->fnames[eRES1], title, atoms, x, NULL, NULL, FALSE, NULL);
sfree(x);
snew(eta_dat->res_IDs, atoms->nres);
snew(eta_dat->res_names, atoms->nres);
snew(eta_dat->res_natoms, atoms->nres);
eta_dat->nres = atoms->nres;
// Only sum the atoms that are in the indexes in atom_IDs
int resid;
for (int i = 0; i < eta_dat->natoms; ++i) {
resid = atoms->atom[eta_dat->atom_IDs[i]].resind;
eta_dat->res_natoms[resid] += 1;
}
for (int i = 0; i < atoms->nres; ++i) {
eta_dat->res_IDs[i] = atoms->resinfo[i].nr;
eta_dat->res_names[i] = *(atoms->resinfo[i].name);
}
// TODO: where can we free tha atoms struct? MEMORY LEAK ALERT
// sfree(atoms->atomname);
// sfree(atoms->atomtype);
// sfree(atoms->atomtypeB);
// sfree(atoms->pdbinfo);
// sfree(atoms->atom);
// sfree(atoms->resinfo);
}
// Try res_tpx for gro and tpr instead of this.
static void res_tps(eta_res_dat_t *eta_dat, t_atoms *atoms) {
char title[256];
t_topology top;
rvec *x = NULL;
matrix box;
int ePBC;
init_top(&top);
read_tps_conf(eta_dat->fnames[eRES1], title, &top, &ePBC, &x, NULL, box, FALSE);
*atoms = top.atoms;
// Cannot use done_top(), causes error- pointer being freed was not allocated. See implementation in typedefs.c
// TODO: free atoms outside of here
// done_atom(&(top.atoms));
done_symtab(&(top.symtab));
done_block(&(top.cgs));
done_block(&(top.mols));
done_blocka(&(top.excls));
sfree(x);
}
// TODO: Does this work for gro files generated by grompp etc?
static void res_tpx(eta_res_dat_t *eta_dat, t_atoms *atoms) {
t_inputrec ir;
gmx_mtop_t mtop;
matrix box;
int natoms;
int i;
read_tpx(eta_dat->fnames[eRES1], &ir, box, &natoms, NULL, NULL, NULL, &mtop);
*atoms = mtop.moltype->atoms;
// TODO: free the rest of the mtop?
}
void init_eta_dat(eta_res_dat_t *eta_dat) {
eta_dat->gamma = GAMMA;
eta_dat->c = COST;
eta_dat->nthreads = -1;
eta_dat->oenv = NULL;
eta_dat->nres = 0;
eta_dat->res_IDs = NULL;
eta_dat->res_names = NULL;
eta_dat->res_natoms = NULL;
eta_dat->eta = NULL;
eta_dat->natoms_all = 0;
}
void free_eta_dat(eta_res_dat_t *eta_dat) {
if (eta_dat->res_IDs) sfree(eta_dat->res_IDs);
if (eta_dat->res_names) sfree(eta_dat->res_names);
if (eta_dat->res_natoms) sfree(eta_dat->res_natoms);
if (eta_dat->eta) sfree(eta_dat->eta);
}
void ensemble_res_comp(eta_res_dat_t *eta_dat) {
const char *io_error = "Input trajectory files must be .xtc, .trr, or .pdb!\n";
const char *fr_error = "Input trajectories have differing numbers of frames!\n";
const char *ndx_error = "Given index groups have differing numbers of atoms!\n";
const char *natom_error = "Input trajectories have differing numbers of atoms!\n";
/* Trajectory data */
rvec **x1, **x2; // Trajectory position vectors
int nframes, nframes2, natoms2, i;
/* Training data */
struct svm_problem *probs; // svm problems for training
struct svm_model **models; // pointers to models produced by training
/* Read trajectory files */
matrix *box = NULL;
switch(fn2ftp(eta_dat->fnames[eTRAJ1])) {
case efXTC:
case efTRR:
case efPDB:
gk_read_traj(eta_dat->fnames[eTRAJ1], &x1, &box, &nframes, &eta_dat->natoms, &eta_dat->oenv);
break;
default:
gk_log_fatal(FARGS, io_error);
}
sfree(box); // don't need box data
box = NULL;
switch(fn2ftp(eta_dat->fnames[eTRAJ2])) {
case efXTC:
case efTRR:
case efPDB:
gk_read_traj(eta_dat->fnames[eTRAJ2], &x2, &box, &nframes2, &natoms2, &eta_dat->oenv);
break;
default:
gk_log_fatal(FARGS, io_error);
}
sfree(box); // don't need box data
box = NULL;
/* In case traj files have different numbers of frames */
if (nframes != nframes2) {
gk_log_fatal(FARGS, fr_error);
}
// Save total natoms before it is potentially changed by index data below.
// Might be needed, for example, by residue reading functions. */
eta_dat->natoms_all = eta_dat->natoms;
/* Index data */
const int NUMGROUPS = 1;
int *isize, *isize2;
atom_id **indx1, **indx2; // Atom indices for the two trajectories
char **grp_names;
snew(isize, NUMGROUPS);
snew(indx1, NUMGROUPS);
snew(grp_names, NUMGROUPS);
/* If an index file was given, get atom group with indices that will be trained */
if (eta_dat->fnames[eNDX1] != NULL) {
rd_index(eta_dat->fnames[eNDX1], NUMGROUPS, isize, indx1, grp_names);
eta_dat->natoms = isize[0];
}
else { // If no index file, set default indices as 0 to natoms - 1
snew(indx1[0], eta_dat->natoms);
for (i = 0; i < eta_dat->natoms; ++i) {
indx1[0][i] = i;
}
}
if (eta_dat->fnames[eNDX2] != NULL) {
snew(isize2, NUMGROUPS);
snew(indx2, NUMGROUPS);
rd_index(eta_dat->fnames[eNDX2], NUMGROUPS, isize2, indx2, grp_names);
if (isize2[0] != eta_dat->natoms) {
gk_log_fatal(FARGS, ndx_error);
}
}
else {
if (natoms2 != eta_dat->natoms) {
gk_log_fatal(FARGS, natom_error);
}
indx2 = indx1;
}
eta_dat->atom_IDs = indx1[0]; // store atom IDs in output
// Get residue information
t_atoms atoms;
gk_print_log("Reading residue info from %s...\n", eta_dat->fnames[eRES1]);
switch(fn2ftp(eta_dat->fnames[eRES1])) {
case efPDB:
res_pdb(eta_dat, &atoms);
break;
case efGRO: // TODO: try using this for tpr as well, or vice versa?
res_tps(eta_dat, &atoms);
break;
case efTPR:
res_tpx(eta_dat, &atoms);
break;
default:
gk_log_fatal(FARGS, "%s is not a supported filetype for residue information. Skipping eta residue calculation.\n",
eta_dat->fnames[eRES1]);
//flush_log();
}
/* Construct svm problems */
traj_res2svm_probs(eta_dat, x1, x2, indx1[0], indx2[0], nframes, &atoms, &probs);
/* No longer need original vectors */
free_traj(x1, nframes);
free_traj(x2, nframes);
/* No longer need index junk (except for what we stored in atom_IDs) */
sfree(isize);
sfree(indx1);
sfree(grp_names);
if (eta_dat->fnames[eNDX2] != NULL) {
sfree(isize2);
sfree(indx2[0]);
sfree(indx2);
}
/* Train SVM */
snew(models, eta_dat->nres);
train_svm_probs(probs, eta_dat->nres, eta_dat->gamma, eta_dat->c, eta_dat->nthreads, models);
/* calculate eta per residue */
snew(eta_dat->eta, eta_dat->nres);
calc_eta(models, eta_dat->nres, nframes, eta_dat->eta);
/* Clean up svm stuff */
free_svm_probs(probs, eta_dat->nres, nframes * 2);
free_svm_models(models, eta_dat->nres);
}
void traj_res2svm_probs(eta_res_dat_t *eta_dat,
rvec **x1,
rvec **x2,
atom_id *indx1,
atom_id *indx2,
int nframes,
t_atoms *atoms,
struct svm_problem **probs) {
int nvecs = nframes * 2;
int i;
double *targets = NULL; // trajectory classification labels
struct svm_node *nodepool = NULL; // allocated memory for storing svm nodes (feature vectors)
gk_print_log("Constructing svm problems for %d residues in %d frames...\n",
atoms->nres, nframes);
//flush_log();
// Build targets array with classification labels
snew(targets, nvecs);
for (i = 0; i < nframes; ++i) {
targets[i] = LABEL1; // trajectory 1
}
for (; i < nvecs; ++i) {
targets[i] = LABEL2; // trajectory 2
}
// Build map from residue IDs to atom IDs
// TODO: make this more efficient
int **res_atoms = NULL;
int *res_atom_lens = NULL;
snew(res_atoms, atoms->nres);
snew(res_atom_lens, atoms->nres);
for (int atom = 0; atom < atoms->nr; ++atom) {
int resind = atoms->atom[atom].resind;
// allocate memory for another atom for this atom's residue
if (res_atom_lens[resind] == 0) {
snew(res_atoms[resind], 1);
} else {
srenew(res_atoms[resind], res_atom_lens[resind] + 1);
}
// add this atom to this atom's residue
res_atoms[resind][res_atom_lens[resind]] = atom;
++res_atom_lens[resind];
}
// Allocate enough space for storing all svm nodes
// 2 trajectories * natoms * nframes * (3 coordinates per atom + one node for the -1 end index)
snew(nodepool, 2 * eta_dat->natoms * nframes * 4);
if (!nodepool)
gk_log_fatal(FARGS, "Failed to allocate memory for svm training vectors!\n");
/* Construct svm problems */
// TODO: de-duplicate code pls
snew(*probs, atoms->nres);
int cur_res, cur_frame, cur_data;
for (cur_res = 0; cur_res < eta_dat->nres; ++cur_res) {
printf("Residue %d...\r", cur_res);
fflush(stdout);
(*probs)[cur_res].l = nvecs;
(*probs)[cur_res].y = targets;
snew((*probs)[cur_res].x, nvecs);
// Insert coordinates from traj1
// For each frame, add all of the coordinates of all atoms in this residue
for (cur_frame = 0, cur_data = 0; cur_frame < nframes; ++cur_frame, ++cur_data) {
(*probs)[cur_res].x[cur_data] = nodepool;
// Coordinates are indexed starting at 1
// All of the coordinates of an atom are added to the vector
// before adding the coordinates of the next atom.
// So the vector is as follows:
// atom1X, atom1Y, atom1Z, atom2X, atom2Y, atom2Z, atom3X...
int index = 0;
// loop through the atoms in this residue
for (i = 0; i < res_atom_lens[cur_res]; ++i) {
// TODO: only add this atom to probs if this atom id
// is present in the given indexes (indx1 or indx2)
int atomid = res_atoms[cur_res][i];
for (int coord = 0; coord < 3; ++coord) {
// there's three coordinate indexes per atom
index = i * 3 + coord;
// svm index starts at 1
(*probs)[cur_res].x[cur_data][index].index = index + 1;
// Scaling by 10 gives more accurate results (or so he says)
(*probs)[cur_res].x[cur_data][index].value = x1[cur_frame][atomid][coord] * 10.0;
}
nodepool += 3;
}
// -1 index marks end of a data vector
(*probs)[cur_res].x[cur_data][index + 1].index = -1;
nodepool += 1;
}
// Insert coordinates from traj2
for (cur_frame = 0; cur_frame < nframes; ++cur_frame, ++cur_data) {
(*probs)[cur_res].x[cur_data] = nodepool;
int index = 0;
for (i = 0; i < res_atom_lens[cur_res]; ++i) {
// TODO: only add this atom to probs if this atom id
// is present in the given indexes (indx1 or indx2)
int atomid = res_atoms[cur_res][i];
for (int coord = 0; coord < 3; ++coord) {
// there's three coordinate indexes per atom
index = i * 3 + coord;
// svm index starts at 1
(*probs)[cur_res].x[cur_data][index].index = index + 1;
// Scaling by 10 gives more accurate results (or so he says)
(*probs)[cur_res].x[cur_data][index].value = x2[cur_frame][atomid][coord] * 10.0;
}
nodepool += 3;
}
// -1 index marks end of a data vector
(*probs)[cur_res].x[cur_data][index + 1].index = -1;
nodepool += 1;
}
}
printf("\n");
fflush(stdout);
// cleanup
for (i = 0; i < atoms->nres; ++i) {
sfree(res_atoms[i]);
}
sfree(res_atoms);
sfree(res_atom_lens);
}
void free_svm_probs(struct svm_problem *probs,
int nprobs,
int nvecs) {
if (nprobs > 0) {
sfree(probs[0].y); // Free target array
if (nvecs > 0)
sfree(probs[0].x[0]); // The first atom's first frame's x points to the head of the node space
}
for (int i = 0; i < nprobs; ++i) {
sfree(probs[i].x);
}
sfree(probs);
}
void train_svm_probs(struct svm_problem *probs,
int num_probs,
real gamma,
real c,
int nthreads,
struct svm_model **models) {
struct svm_parameter param; // Parameters used for training
gk_print_log("svm-training trajectory atoms with gamma = %f and C = %f...\n", gamma, c);
//flush_log();
/* Set svm parameters */
param.svm_type = C_SVC;
param.kernel_type = RBF;
param.degree = 3;
param.gamma = gamma;
param.coef0 = 0.0;
param.cache_size = 100.0;
param.eps = 0.001;
param.C = c;
param.nr_weight = 0;
param.nu = 0.5;
param.p = 0.1;
param.shrinking = 1;
param.probability = 0;
#ifdef _OPENMP
if (nthreads > 0)
omp_set_num_threads(nthreads);
if (nthreads > 1 || nthreads <= 0)
gk_print_log("svm training will be parallelized.\n");
#endif
/* Train svm */
int i;
#pragma omp parallel for schedule(dynamic) private(i) shared(num_probs,models,probs,param)
for (i = 0; i < num_probs; ++i) {
#if defined _OPENMP && defined EC_DEBUG
gk_print_log("%d threads running svm-train.\n", omp_get_num_threads());
#endif
models[i] = svm_train(&(probs[i]), ¶m);
}
}
static void free_svm_model(struct svm_model *model) {
svm_free_model_content(model);
svm_free_and_destroy_model(&model);
}
void free_svm_models(struct svm_model **models, int num_models) {
for (int i = 0; i < num_models; ++i) {
free_svm_model(models[i]);
}
sfree(models);
}
void calc_eta(struct svm_model **models,
int num_models,
int num_frames,
real *eta) {
int i;
gk_print_log("Calculating eta values...\n");
//flush_log();
for (i = 0; i < num_models; ++i) {
eta[i] = 1.0 - svm_get_nr_sv(models[i]) / (2.0 * (real)num_frames);
}
}
void save_eta(eta_res_dat_t *eta_dat) {
// residue etas
if (eta_dat->eta) {
FILE *f = fopen(eta_dat->fnames[eETA_RES], "w");
if (f) {
gk_print_log("Saving residue eta values to %s...\n",
eta_dat->fnames[eETA_RES]);
fprintf(f, "# RES\tETA\n");
for (int i = 0; i < eta_dat->nres; ++i) {
fprintf(f, "%d%s\t%f\n", eta_dat->res_IDs[i],
eta_dat->res_names[i],
eta_dat->eta[i]);
}
fclose(f);
f = NULL;
}
else {
gk_print_log("Failed to open file %s for saving residue eta values.\n",
eta_dat->fnames[eETA_RES]);
}
}
//flush_log();
}
void free_traj(rvec **x, int nframes) {
for (int i = 0; i < nframes; ++i) {
sfree(x[i]);
}
sfree(x);
}
|
par_build_table.c | /* Generated by Cython 0.20.1 on Tue Apr 22 20:29:04 2014 */
#define PY_SSIZE_T_CLEAN
#ifndef CYTHON_USE_PYLONG_INTERNALS
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 0
#else
#include "pyconfig.h"
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 1
#else
#define CYTHON_USE_PYLONG_INTERNALS 0
#endif
#endif
#endif
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02040000
#error Cython requires Python 2.4+.
#else
#define CYTHON_ABI "0_20_1"
#include <stddef.h> /* For offsetof */
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if CYTHON_COMPILING_IN_PYPY
#define Py_OptimizeFlag 0
#endif
#if PY_VERSION_HEX < 0x02050000
typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
#define PY_FORMAT_SIZE_T ""
#define CYTHON_FORMAT_SSIZE_T ""
#define PyInt_FromSsize_t(z) PyInt_FromLong(z)
#define PyInt_AsSsize_t(o) __Pyx_PyInt_As_int(o)
#define PyNumber_Index(o) ((PyNumber_Check(o) && !PyFloat_Check(o)) ? PyNumber_Int(o) : \
(PyErr_Format(PyExc_TypeError, \
"expected index value, got %.200s", Py_TYPE(o)->tp_name), \
(PyObject*)0))
#define __Pyx_PyIndex_Check(o) (PyNumber_Check(o) && !PyFloat_Check(o) && \
!PyComplex_Check(o))
#define PyIndex_Check __Pyx_PyIndex_Check
#define PyErr_WarnEx(category, message, stacklevel) PyErr_Warn(category, message)
#define __PYX_BUILD_PY_SSIZE_T "i"
#else
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#define __Pyx_PyIndex_Check PyIndex_Check
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
#define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
#define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
#define PyVarObject_HEAD_INIT(type, size) \
PyObject_HEAD_INIT(type) size,
#define PyType_Modified(t)
typedef struct {
void *buf;
PyObject *obj;
Py_ssize_t len;
Py_ssize_t itemsize;
int readonly;
int ndim;
char *format;
Py_ssize_t *shape;
Py_ssize_t *strides;
Py_ssize_t *suboffsets;
void *internal;
} Py_buffer;
#define PyBUF_SIMPLE 0
#define PyBUF_WRITABLE 0x0001
#define PyBUF_FORMAT 0x0004
#define PyBUF_ND 0x0008
#define PyBUF_STRIDES (0x0010 | PyBUF_ND)
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_FORMAT | PyBUF_WRITABLE)
#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_FORMAT | PyBUF_WRITABLE)
typedef int (*getbufferproc)(PyObject *, Py_buffer *, int);
typedef void (*releasebufferproc)(PyObject *, Py_buffer *);
#endif
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyUnicode_FromString(s) PyUnicode_Decode(s, strlen(s), "UTF-8", "strict")
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_VERSION_HEX < 0x02060000
#define Py_TPFLAGS_HAVE_VERSION_TAG 0
#endif
#if PY_VERSION_HEX < 0x02060000 && !defined(Py_TPFLAGS_IS_ABSTRACT)
#define Py_TPFLAGS_IS_ABSTRACT 0
#endif
#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_VERSION_HEX < 0x02060000
#define PyBytesObject PyStringObject
#define PyBytes_Type PyString_Type
#define PyBytes_Check PyString_Check
#define PyBytes_CheckExact PyString_CheckExact
#define PyBytes_FromString PyString_FromString
#define PyBytes_FromStringAndSize PyString_FromStringAndSize
#define PyBytes_FromFormat PyString_FromFormat
#define PyBytes_DecodeEscape PyString_DecodeEscape
#define PyBytes_AsString PyString_AsString
#define PyBytes_AsStringAndSize PyString_AsStringAndSize
#define PyBytes_Size PyString_Size
#define PyBytes_AS_STRING PyString_AS_STRING
#define PyBytes_GET_SIZE PyString_GET_SIZE
#define PyBytes_Repr PyString_Repr
#define PyBytes_Concat PyString_Concat
#define PyBytes_ConcatAndDel PyString_ConcatAndDel
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj) || \
PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#if PY_VERSION_HEX < 0x02060000
#define PySet_Check(obj) PyObject_TypeCheck(obj, &PySet_Type)
#define PyFrozenSet_Check(obj) PyObject_TypeCheck(obj, &PyFrozenSet_Type)
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if (PY_MAJOR_VERSION < 3) || (PY_VERSION_HEX >= 0x03010300)
#define __Pyx_PySequence_GetSlice(obj, a, b) PySequence_GetSlice(obj, a, b)
#define __Pyx_PySequence_SetSlice(obj, a, b, value) PySequence_SetSlice(obj, a, b, value)
#define __Pyx_PySequence_DelSlice(obj, a, b) PySequence_DelSlice(obj, a, b)
#else
#define __Pyx_PySequence_GetSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), (PyObject*)0) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_GetSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", (obj)->ob_type->tp_name), (PyObject*)0)))
#define __Pyx_PySequence_SetSlice(obj, a, b, value) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_SetSlice(obj, a, b, value)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice assignment", (obj)->ob_type->tp_name), -1)))
#define __Pyx_PySequence_DelSlice(obj, a, b) (unlikely(!(obj)) ? \
(PyErr_SetString(PyExc_SystemError, "null argument to internal routine"), -1) : \
(likely((obj)->ob_type->tp_as_mapping) ? (PySequence_DelSlice(obj, a, b)) : \
(PyErr_Format(PyExc_TypeError, "'%.200s' object doesn't support slice deletion", (obj)->ob_type->tp_name), -1)))
#endif
#if PY_MAJOR_VERSION >= 3
#define PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
#else
#define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
#define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
#define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
#endif
#if PY_VERSION_HEX < 0x02050000
#define __Pyx_NAMESTR(n) ((char *)(n))
#define __Pyx_DOCSTR(n) ((char *)(n))
#else
#define __Pyx_NAMESTR(n) (n)
#define __Pyx_DOCSTR(n) (n)
#endif
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#define __PYX_HAVE__radiotool__algorithms__par_build_table
#define __PYX_HAVE_API__radiotool__algorithms__par_build_table
#include "string.h"
#include "stdio.h"
#include "pythread.h"
#include "stdlib.h"
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \
(sizeof(type) < sizeof(Py_ssize_t)) || \
(sizeof(type) > sizeof(Py_ssize_t) && \
likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX) && \
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \
v == (type)PY_SSIZE_T_MIN))) || \
(sizeof(type) == sizeof(Py_ssize_t) && \
(is_signed || likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((char*)s)
#define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s)
#define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((char*)s)
#define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((char*)s)
#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return u_end - u - 1;
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys = NULL;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
sys = PyImport_ImportModule("sys");
if (sys == NULL) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
if (default_encoding == NULL) goto bad;
if (strcmp(PyBytes_AsString(default_encoding), "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
const char* default_encoding_c = PyBytes_AS_STRING(default_encoding);
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (ascii_chars_u == NULL) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (ascii_chars_b == NULL || strncmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
}
Py_XDECREF(sys);
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return 0;
bad:
Py_XDECREF(sys);
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys = NULL;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (sys == NULL) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
if (default_encoding == NULL) goto bad;
default_encoding_c = PyBytes_AS_STRING(default_encoding);
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(sys);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(sys);
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
#ifdef __GNUC__
/* Test for GCC > 2.95 */
#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* __GNUC__ > 2 ... */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ > 2 ... */
#else /* __GNUC__ */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"par_build_table.pyx",
"array.pxd",
"stringsource",
};
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name; /* for error messages only */
struct __Pyx_StructField_* fields;
size_t size; /* sizeof(type) */
size_t arraysize[8]; /* length of array in each dimension */
int ndim;
char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && MSC_VER
#include <Windows.h>
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using MSVC atomics"
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview) \
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview) \
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/*--- Type declarations ---*/
#ifndef _ARRAYARRAY_H
struct arrayobject;
typedef struct arrayobject arrayobject;
#endif
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params;
struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table;
/* "radiotool/algorithms/par_build_table.pyx":11
* from cython cimport parallel
*
* cdef struct Params: # <<<<<<<<<<<<<<
* double pen_val
* int p0
*/
struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params {
double pen_val;
int p0;
int p0_full;
int n_beats;
int n_pauses;
int min_beats;
int max_beats;
int max_beats_with_padding;
int all_full;
};
/* "radiotool/algorithms/par_build_table.pyx":481
*
*
* cpdef int[:] build_table(double[:, :] trans_cost, double[:, :] penalty, # <<<<<<<<<<<<<<
* int min_beats=-1, int max_beats=-1, int first_pause=-1):
*
*/
struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table {
int __pyx_n;
int min_beats;
int max_beats;
int first_pause;
};
/* "View.MemoryView":96
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":275
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":308
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":930
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":308
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":930
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); /*proto*/
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
if (acquire_gil) { \
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
PyGILState_Release(__pyx_gilstate_save); \
} else { \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext() \
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif /* CYTHON_REFNANNY */
#define __Pyx_XDECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_XDECREF(tmp); \
} while (0)
#define __Pyx_DECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_DECREF(tmp); \
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback); /*proto*/
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
const char* function_name); /*proto*/
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact); /*proto*/
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /*proto*/
#include <string.h>
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
#define UNARY_NEG_WOULD_OVERFLOW(x) (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
Py_SIZE(list) = len+1;
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
#ifndef _ARRAYARRAY_H
#define _ARRAYARRAY_H
typedef struct arraydescr {
int typecode;
int itemsize;
PyObject * (*getitem)(struct arrayobject *, Py_ssize_t);
int (*setitem)(struct arrayobject *, Py_ssize_t, PyObject *);
#if PY_VERSION_HEX >= 0x03000000
char *formats;
#endif
} arraydescr;
struct arrayobject {
PyObject_HEAD
Py_ssize_t ob_size;
union {
char *ob_item;
float *as_floats;
double *as_doubles;
int *as_ints;
unsigned int *as_uints;
unsigned char *as_uchars;
signed char *as_schars;
char *as_chars;
unsigned long *as_ulongs;
long *as_longs;
short *as_shorts;
unsigned short *as_ushorts;
Py_UNICODE *as_pyunicodes;
void *as_voidptr;
} data;
Py_ssize_t allocated;
struct arraydescr *ob_descr;
PyObject *weakreflist; /* List of weak references */
#if PY_VERSION_HEX >= 0x03000000
int ob_exports; /* Number of exported buffers */
#endif
};
#ifndef NO_NEWARRAY_INLINE
static CYTHON_INLINE PyObject * newarrayobject(PyTypeObject *type, Py_ssize_t size,
struct arraydescr *descr) {
arrayobject *op;
size_t nbytes;
if (size < 0) {
PyErr_BadInternalCall();
return NULL;
}
nbytes = size * descr->itemsize;
if (nbytes / descr->itemsize != (size_t)size) {
return PyErr_NoMemory();
}
op = (arrayobject *) type->tp_alloc(type, 0);
if (op == NULL) {
return NULL;
}
op->ob_descr = descr;
op->allocated = size;
op->weakreflist = NULL;
op->ob_size = size;
if (size <= 0) {
op->data.ob_item = NULL;
}
else {
op->data.ob_item = PyMem_NEW(char, nbytes);
if (op->data.ob_item == NULL) {
Py_DECREF(op);
return PyErr_NoMemory();
}
}
return (PyObject *) op;
}
#else
PyObject* newarrayobject(PyTypeObject *type, Py_ssize_t size,
struct arraydescr *descr);
#endif /* ifndef NO_NEWARRAY_INLINE */
static CYTHON_INLINE int resize(arrayobject *self, Py_ssize_t n) {
void *items = (void*) self->data.ob_item;
PyMem_Resize(items, char, (size_t)(n * self->ob_descr->itemsize));
if (items == NULL) {
PyErr_NoMemory();
return -1;
}
self->data.ob_item = (char*) items;
self->ob_size = n;
self->allocated = n;
return 0;
}
static CYTHON_INLINE int resize_smart(arrayobject *self, Py_ssize_t n) {
void *items = (void*) self->data.ob_item;
Py_ssize_t newsize;
if (n < self->allocated) {
if (n*4 > self->allocated) {
self->ob_size = n;
return 0;
}
}
newsize = n * 3 / 2 + 1;
PyMem_Resize(items, char, (size_t)(newsize * self->ob_descr->itemsize));
if (items == NULL) {
PyErr_NoMemory();
return -1;
}
self->data.ob_item = (char*) items;
self->ob_size = n;
self->allocated = newsize;
return 0;
}
#endif
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0};
static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1};
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *);
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
static PyObject *__pyx_memview_get_int(const char *itemp); /* proto */
static int __pyx_memview_set_int(const char *itemp, PyObject *obj); /* proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim);
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *);
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *);
static int __Pyx_check_binary_version(void);
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
static PyObject *__Pyx_ImportModule(const char *name); /*proto*/
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /*proto*/
typedef struct {
int code_line;
PyCodeObject* code_object;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename); /*proto*/
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.exc' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'array' */
/* Module declarations from 'cpython.array' */
static PyTypeObject *__pyx_ptype_7cpython_5array_array = 0;
static CYTHON_INLINE arrayobject *__pyx_f_7cpython_5array_clone(arrayobject *, Py_ssize_t, int); /*proto*/
static CYTHON_INLINE int __pyx_f_7cpython_5array_extend_buffer(arrayobject *, char *, Py_ssize_t); /*proto*/
/* Module declarations from 'radiotool.algorithms.par_build_table' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__Pyx_memviewslice, int, __Pyx_memviewslice, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params); /*proto*/
static double __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__Pyx_memviewslice, int, int, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__Pyx_memviewslice, int, __Pyx_memviewslice, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_backward_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int, __Pyx_memviewslice, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice); /*proto*/
static __Pyx_memviewslice __pyx_f_9radiotool_10algorithms_15par_build_table_build_table(__Pyx_memviewslice, __Pyx_memviewslice, int __pyx_skip_dispatch, struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table *__pyx_optional_args); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 };
static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, IS_UNSIGNED(int) ? 'U' : 'I', IS_UNSIGNED(int), 0 };
#define __Pyx_MODULE_NAME "radiotool.algorithms.par_build_table"
int __pyx_module_is_main_radiotool__algorithms__par_build_table = 0;
/* Implementation of 'radiotool.algorithms.par_build_table' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_xrange;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static PyObject *__pyx_pf_9radiotool_10algorithms_15par_build_table_build_table(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_trans_cost, __Pyx_memviewslice __pyx_v_penalty, int __pyx_v_min_beats, int __pyx_v_max_beats, int __pyx_v_first_pause); /* proto */
static int __pyx_pf_7cpython_5array_5array___getbuffer__(arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info, CYTHON_UNUSED int __pyx_v_flags); /* proto */
static void __pyx_pf_7cpython_5array_5array_2__releasebuffer__(CYTHON_UNUSED arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static int __pyx_array_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *get_memview_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static int __pyx_MemviewEnum_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static int __pyx_memoryview_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static void __pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static char __pyx_k_O[] = "O";
static char __pyx_k_c[] = "c";
static char __pyx_k_d[] = "d";
static char __pyx_k_i[] = "i";
static char __pyx_k_id[] = "id";
static char __pyx_k_obj[] = "obj";
static char __pyx_k_base[] = "base";
static char __pyx_k_main[] = "__main__";
static char __pyx_k_mode[] = "mode";
static char __pyx_k_name[] = "name";
static char __pyx_k_ndim[] = "ndim";
static char __pyx_k_pack[] = "pack";
static char __pyx_k_size[] = "size";
static char __pyx_k_step[] = "step";
static char __pyx_k_stop[] = "stop";
static char __pyx_k_test[] = "__test__";
static char __pyx_k_ASCII[] = "ASCII";
static char __pyx_k_class[] = "__class__";
static char __pyx_k_error[] = "error";
static char __pyx_k_flags[] = "flags";
static char __pyx_k_range[] = "range";
static char __pyx_k_shape[] = "shape";
static char __pyx_k_start[] = "start";
static char __pyx_k_decode[] = "decode";
static char __pyx_k_encode[] = "encode";
static char __pyx_k_format[] = "format";
static char __pyx_k_import[] = "__import__";
static char __pyx_k_name_2[] = "__name__";
static char __pyx_k_struct[] = "struct";
static char __pyx_k_unpack[] = "unpack";
static char __pyx_k_xrange[] = "xrange";
static char __pyx_k_fortran[] = "fortran";
static char __pyx_k_memview[] = "memview";
static char __pyx_k_penalty[] = "penalty";
static char __pyx_k_Ellipsis[] = "Ellipsis";
static char __pyx_k_itemsize[] = "itemsize";
static char __pyx_k_TypeError[] = "TypeError";
static char __pyx_k_enumerate[] = "enumerate";
static char __pyx_k_max_beats[] = "max_beats";
static char __pyx_k_min_beats[] = "min_beats";
static char __pyx_k_IndexError[] = "IndexError";
static char __pyx_k_ValueError[] = "ValueError";
static char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static char __pyx_k_trans_cost[] = "trans_cost";
static char __pyx_k_MemoryError[] = "MemoryError";
static char __pyx_k_first_pause[] = "first_pause";
static char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static char __pyx_k_allocate_buffer[] = "allocate_buffer";
static char __pyx_k_dtype_is_object[] = "dtype_is_object";
static char __pyx_k_pyx_releasebuffer[] = "__pyx_releasebuffer";
static char __pyx_k_strided_and_direct[] = "<strided and direct>";
static char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static char __pyx_k_getbuffer_obj_view_flags[] = "getbuffer(obj, view, flags)";
static char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct";
static char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)";
static char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)";
static char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced";
static char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions";
static char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static char __pyx_k_unable_to_allocate_shape_or_stri[] = "unable to allocate shape or strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_b_c;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_d;
static PyObject *__pyx_n_s_decode;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_first_pause;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_b_fortran;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_max_beats;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_min_beats;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_penalty;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_releasebuffer;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_trans_cost;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_or_stri;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_xrange;
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__18;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
/* "radiotool/algorithms/par_build_table.pyx":23
*
*
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int tc_index = 0
* cdef int beat_seg_i = 0
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__Pyx_memviewslice __pyx_v_tc, int __pyx_v_column, __Pyx_memviewslice __pyx_v_tc_column, int __pyx_v_backward, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p) {
int __pyx_v_tc_index;
int __pyx_v_beat_seg_i;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
long __pyx_t_26;
int __pyx_t_27;
/* "radiotool/algorithms/par_build_table.pyx":24
*
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil:
* cdef int tc_index = 0 # <<<<<<<<<<<<<<
* cdef int beat_seg_i = 0
* cdef int i, j
*/
__pyx_v_tc_index = 0;
/* "radiotool/algorithms/par_build_table.pyx":25
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil:
* cdef int tc_index = 0
* cdef int beat_seg_i = 0 # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
__pyx_v_beat_seg_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":28
* cdef int i, j
*
* if column >= p.p0_full: # <<<<<<<<<<<<<<
* tc_index = p.p0 + (column - p.p0_full)
* else:
*/
__pyx_t_1 = ((__pyx_v_column >= __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":29
*
* if column >= p.p0_full:
* tc_index = p.p0 + (column - p.p0_full) # <<<<<<<<<<<<<<
* else:
* tc_index = column % p.n_beats
*/
__pyx_v_tc_index = (__pyx_v_p.p0 + (__pyx_v_column - __pyx_v_p.p0_full));
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":31
* tc_index = p.p0 + (column - p.p0_full)
* else:
* tc_index = column % p.n_beats # <<<<<<<<<<<<<<
*
* if not backward:
*/
__pyx_v_tc_index = (__pyx_v_column % __pyx_v_p.n_beats);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":33
* tc_index = column % p.n_beats
*
* if not backward: # <<<<<<<<<<<<<<
* for i in range(p.max_beats):
* for j in range(p.p0):
*/
__pyx_t_1 = ((!(__pyx_v_backward != 0)) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":34
*
* if not backward:
* for i in range(p.max_beats): # <<<<<<<<<<<<<<
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[j, tc_index]
*/
__pyx_t_2 = __pyx_v_p.max_beats;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":35
* if not backward:
* for i in range(p.max_beats):
* for j in range(p.p0): # <<<<<<<<<<<<<<
* tc_column[i * p.n_beats + j] = tc[j, tc_index]
*
*/
__pyx_t_4 = __pyx_v_p.p0;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_j = __pyx_t_5;
/* "radiotool/algorithms/par_build_table.pyx":36
* for i in range(p.max_beats):
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[j, tc_index] # <<<<<<<<<<<<<<
*
* for i in range(p.p0_full, p.all_full):
*/
__pyx_t_6 = __pyx_v_j;
__pyx_t_7 = __pyx_v_tc_index;
__pyx_t_8 = ((__pyx_v_i * __pyx_v_p.n_beats) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_8 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_6 * __pyx_v_tc.strides[0]) ) + __pyx_t_7 * __pyx_v_tc.strides[1]) )));
}
}
/* "radiotool/algorithms/par_build_table.pyx":38
* tc_column[i * p.n_beats + j] = tc[j, tc_index]
*
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* tc_column[i] = tc[p.p0 + i - p.p0_full, tc_index]
* else:
*/
__pyx_t_2 = __pyx_v_p.all_full;
for (__pyx_t_3 = __pyx_v_p.p0_full; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":39
*
* for i in range(p.p0_full, p.all_full):
* tc_column[i] = tc[p.p0 + i - p.p0_full, tc_index] # <<<<<<<<<<<<<<
* else:
* for i in range(p.max_beats):
*/
__pyx_t_4 = ((__pyx_v_p.p0 + __pyx_v_i) - __pyx_v_p.p0_full);
__pyx_t_5 = __pyx_v_tc_index;
__pyx_t_9 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_9 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_4 * __pyx_v_tc.strides[0]) ) + __pyx_t_5 * __pyx_v_tc.strides[1]) )));
}
goto __pyx_L4;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":41
* tc_column[i] = tc[p.p0 + i - p.p0_full, tc_index]
* else:
* for i in range(p.max_beats): # <<<<<<<<<<<<<<
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[tc_index, j]
*/
__pyx_t_2 = __pyx_v_p.max_beats;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":42
* else:
* for i in range(p.max_beats):
* for j in range(p.p0): # <<<<<<<<<<<<<<
* tc_column[i * p.n_beats + j] = tc[tc_index, j]
*
*/
__pyx_t_10 = __pyx_v_p.p0;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_10; __pyx_t_11+=1) {
__pyx_v_j = __pyx_t_11;
/* "radiotool/algorithms/par_build_table.pyx":43
* for i in range(p.max_beats):
* for j in range(p.p0):
* tc_column[i * p.n_beats + j] = tc[tc_index, j] # <<<<<<<<<<<<<<
*
* for i in range(p.p0_full, p.all_full):
*/
__pyx_t_12 = __pyx_v_tc_index;
__pyx_t_13 = __pyx_v_j;
__pyx_t_14 = ((__pyx_v_i * __pyx_v_p.n_beats) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_14 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_12 * __pyx_v_tc.strides[0]) ) + __pyx_t_13 * __pyx_v_tc.strides[1]) )));
}
}
/* "radiotool/algorithms/par_build_table.pyx":45
* tc_column[i * p.n_beats + j] = tc[tc_index, j]
*
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* tc_column[i] = tc[tc_index, p.p0 + i - p.p0_full]
*
*/
__pyx_t_2 = __pyx_v_p.all_full;
for (__pyx_t_3 = __pyx_v_p.p0_full; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":46
*
* for i in range(p.p0_full, p.all_full):
* tc_column[i] = tc[tc_index, p.p0 + i - p.p0_full] # <<<<<<<<<<<<<<
*
* #--- CONSTRAINTS ---#
*/
__pyx_t_10 = __pyx_v_tc_index;
__pyx_t_11 = ((__pyx_v_p.p0 + __pyx_v_i) - __pyx_v_p.p0_full);
__pyx_t_15 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_15 * __pyx_v_tc_column.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_10 * __pyx_v_tc.strides[0]) ) + __pyx_t_11 * __pyx_v_tc.strides[1]) )));
}
}
__pyx_L4:;
/* "radiotool/algorithms/par_build_table.pyx":50
* #--- CONSTRAINTS ---#
* # * don't go to pause before minimum length music segment
* if (column == p.p0_full) and (not backward): # <<<<<<<<<<<<<<
* for i in range(p.n_beats * p.min_beats):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = ((__pyx_v_column == __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
__pyx_t_16 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_17 = __pyx_t_16;
} else {
__pyx_t_17 = __pyx_t_1;
}
if (__pyx_t_17) {
/* "radiotool/algorithms/par_build_table.pyx":51
* # * don't go to pause before minimum length music segment
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.min_beats): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
* elif (column < p.n_beats * p.min_beats) and backward:
*/
__pyx_t_2 = (__pyx_v_p.n_beats * __pyx_v_p.min_beats);
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "radiotool/algorithms/par_build_table.pyx":52
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.min_beats):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
* elif (column < p.n_beats * p.min_beats) and backward:
* tc_column[p.p0_full] += p.pen_val
*/
__pyx_t_18 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_18 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L17;
}
/* "radiotool/algorithms/par_build_table.pyx":53
* for i in range(p.n_beats * p.min_beats):
* tc_column[i] += p.pen_val
* elif (column < p.n_beats * p.min_beats) and backward: # <<<<<<<<<<<<<<
* tc_column[p.p0_full] += p.pen_val
*
*/
__pyx_t_17 = (__pyx_v_column < (__pyx_v_p.n_beats * __pyx_v_p.min_beats));
if (__pyx_t_17) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_17;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":54
* tc_column[i] += p.pen_val
* elif (column < p.n_beats * p.min_beats) and backward:
* tc_column[p.p0_full] += p.pen_val # <<<<<<<<<<<<<<
*
* # * don't go to pause after maximum length music segment
*/
__pyx_t_2 = __pyx_v_p.p0_full;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_2 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
goto __pyx_L17;
}
__pyx_L17:;
/* "radiotool/algorithms/par_build_table.pyx":57
*
* # * don't go to pause after maximum length music segment
* if (column == p.p0_full) and (not backward): # <<<<<<<<<<<<<<
* for i in range(p.n_beats * p.max_beats, p.p0_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = ((__pyx_v_column == __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
__pyx_t_17 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_16 = __pyx_t_17;
} else {
__pyx_t_16 = __pyx_t_1;
}
if (__pyx_t_16) {
/* "radiotool/algorithms/par_build_table.pyx":58
* # * don't go to pause after maximum length music segment
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.max_beats, p.p0_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward:
*/
__pyx_t_3 = __pyx_v_p.p0_full;
for (__pyx_t_19 = (__pyx_v_p.n_beats * __pyx_v_p.max_beats); __pyx_t_19 < __pyx_t_3; __pyx_t_19+=1) {
__pyx_v_i = __pyx_t_19;
/* "radiotool/algorithms/par_build_table.pyx":59
* if (column == p.p0_full) and (not backward):
* for i in range(p.n_beats * p.max_beats, p.p0_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward:
* tc_column[p.p0_full] += p.pen_val
*/
__pyx_t_20 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_20 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L20;
}
/* "radiotool/algorithms/par_build_table.pyx":60
* for i in range(p.n_beats * p.max_beats, p.p0_full):
* tc_column[i] += p.pen_val
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward: # <<<<<<<<<<<<<<
* tc_column[p.p0_full] += p.pen_val
*
*/
__pyx_t_16 = (__pyx_v_p.p0_full > __pyx_v_column);
if (__pyx_t_16) {
__pyx_t_16 = (__pyx_v_column >= (__pyx_v_p.n_beats * __pyx_v_p.max_beats));
}
if (__pyx_t_16) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_16;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":61
* tc_column[i] += p.pen_val
* elif (p.p0_full > column >= p.n_beats * p.max_beats) and backward:
* tc_column[p.p0_full] += p.pen_val # <<<<<<<<<<<<<<
*
* # * after pause, don't go to non-first segment beat
*/
__pyx_t_3 = __pyx_v_p.p0_full;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_3 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
goto __pyx_L20;
}
__pyx_L20:;
/* "radiotool/algorithms/par_build_table.pyx":64
*
* # * after pause, don't go to non-first segment beat
* if (p.n_beats <= column < p.p0_full) and (not backward): # <<<<<<<<<<<<<<
* for i in range(p.p0_full, p.all_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = (__pyx_v_p.n_beats <= __pyx_v_column);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_column < __pyx_v_p.p0_full);
}
if ((__pyx_t_1 != 0)) {
__pyx_t_16 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_17 = __pyx_t_16;
} else {
__pyx_t_17 = (__pyx_t_1 != 0);
}
if (__pyx_t_17) {
/* "radiotool/algorithms/par_build_table.pyx":65
* # * after pause, don't go to non-first segment beat
* if (p.n_beats <= column < p.p0_full) and (not backward):
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
* elif (column >= p.p0_full) and backward:
*/
__pyx_t_19 = __pyx_v_p.all_full;
for (__pyx_t_21 = __pyx_v_p.p0_full; __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":66
* if (p.n_beats <= column < p.p0_full) and (not backward):
* for i in range(p.p0_full, p.all_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
* elif (column >= p.p0_full) and backward:
* for i in range(p.n_beats, p.p0_full):
*/
__pyx_t_22 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_22 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L23;
}
/* "radiotool/algorithms/par_build_table.pyx":67
* for i in range(p.p0_full, p.all_full):
* tc_column[i] += p.pen_val
* elif (column >= p.p0_full) and backward: # <<<<<<<<<<<<<<
* for i in range(p.n_beats, p.p0_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_17 = (__pyx_v_column >= __pyx_v_p.p0_full);
if (__pyx_t_17) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_17;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":68
* tc_column[i] += p.pen_val
* elif (column >= p.p0_full) and backward:
* for i in range(p.n_beats, p.p0_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
*
*/
__pyx_t_19 = __pyx_v_p.p0_full;
for (__pyx_t_21 = __pyx_v_p.n_beats; __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":69
* elif (column >= p.p0_full) and backward:
* for i in range(p.n_beats, p.p0_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
*
* # * don't move between beats the don't follow segment index
*/
__pyx_t_23 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_23 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L23;
}
__pyx_L23:;
/* "radiotool/algorithms/par_build_table.pyx":72
*
* # * don't move between beats the don't follow segment index
* if column < p.p0_full: # <<<<<<<<<<<<<<
* for i in range(p.p0_full):
* tc_column[i] += p.pen_val
*/
__pyx_t_1 = ((__pyx_v_column < __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":73
* # * don't move between beats the don't follow segment index
* if column < p.p0_full:
* for i in range(p.p0_full): # <<<<<<<<<<<<<<
* tc_column[i] += p.pen_val
*
*/
__pyx_t_19 = __pyx_v_p.p0_full;
for (__pyx_t_21 = 0; __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":74
* if column < p.p0_full:
* for i in range(p.p0_full):
* tc_column[i] += p.pen_val # <<<<<<<<<<<<<<
*
* beat_seg_i = column / p.n_beats
*/
__pyx_t_24 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_24 * __pyx_v_tc_column.strides[0]) )) += __pyx_v_p.pen_val;
}
/* "radiotool/algorithms/par_build_table.pyx":76
* tc_column[i] += p.pen_val
*
* beat_seg_i = column / p.n_beats # <<<<<<<<<<<<<<
*
* if (beat_seg_i > 0) and (not backward):
*/
__pyx_v_beat_seg_i = (__pyx_v_column / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":78
* beat_seg_i = column / p.n_beats
*
* if (beat_seg_i > 0) and (not backward): # <<<<<<<<<<<<<<
* for i in range((beat_seg_i - 1) * p.n_beats, beat_seg_i * p.n_beats):
* tc_column[i] -= p.pen_val
*/
__pyx_t_1 = ((__pyx_v_beat_seg_i > 0) != 0);
if (__pyx_t_1) {
__pyx_t_17 = ((!(__pyx_v_backward != 0)) != 0);
__pyx_t_16 = __pyx_t_17;
} else {
__pyx_t_16 = __pyx_t_1;
}
if (__pyx_t_16) {
/* "radiotool/algorithms/par_build_table.pyx":79
*
* if (beat_seg_i > 0) and (not backward):
* for i in range((beat_seg_i - 1) * p.n_beats, beat_seg_i * p.n_beats): # <<<<<<<<<<<<<<
* tc_column[i] -= p.pen_val
*
*/
__pyx_t_19 = (__pyx_v_beat_seg_i * __pyx_v_p.n_beats);
for (__pyx_t_21 = ((__pyx_v_beat_seg_i - 1) * __pyx_v_p.n_beats); __pyx_t_21 < __pyx_t_19; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":80
* if (beat_seg_i > 0) and (not backward):
* for i in range((beat_seg_i - 1) * p.n_beats, beat_seg_i * p.n_beats):
* tc_column[i] -= p.pen_val # <<<<<<<<<<<<<<
*
* elif (beat_seg_i < p.max_beats - 1) and backward:
*/
__pyx_t_25 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_25 * __pyx_v_tc_column.strides[0]) )) -= __pyx_v_p.pen_val;
}
goto __pyx_L31;
}
/* "radiotool/algorithms/par_build_table.pyx":82
* tc_column[i] -= p.pen_val
*
* elif (beat_seg_i < p.max_beats - 1) and backward: # <<<<<<<<<<<<<<
* for i in range((beat_seg_i + 1) * p.n_beats, (beat_seg_i + 2) * p.n_beats):
* tc_column[i] -= p.pen_val
*/
__pyx_t_16 = (__pyx_v_beat_seg_i < (__pyx_v_p.max_beats - 1));
if (__pyx_t_16) {
__pyx_t_1 = (__pyx_v_backward != 0);
} else {
__pyx_t_1 = __pyx_t_16;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":83
*
* elif (beat_seg_i < p.max_beats - 1) and backward:
* for i in range((beat_seg_i + 1) * p.n_beats, (beat_seg_i + 2) * p.n_beats): # <<<<<<<<<<<<<<
* tc_column[i] -= p.pen_val
*
*/
__pyx_t_26 = ((__pyx_v_beat_seg_i + 2) * __pyx_v_p.n_beats);
for (__pyx_t_19 = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats); __pyx_t_19 < __pyx_t_26; __pyx_t_19+=1) {
__pyx_v_i = __pyx_t_19;
/* "radiotool/algorithms/par_build_table.pyx":84
* elif (beat_seg_i < p.max_beats - 1) and backward:
* for i in range((beat_seg_i + 1) * p.n_beats, (beat_seg_i + 2) * p.n_beats):
* tc_column[i] -= p.pen_val # <<<<<<<<<<<<<<
*
* # you're also allowed to move infinitely among the
*/
__pyx_t_21 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_21 * __pyx_v_tc_column.strides[0]) )) -= __pyx_v_p.pen_val;
}
goto __pyx_L31;
}
__pyx_L31:;
/* "radiotool/algorithms/par_build_table.pyx":88
* # you're also allowed to move infinitely among the
* # last beat if max_beats is not set (== -1)
* if p.max_beats == -1 and (beat_seg_i == p.min_beats): # <<<<<<<<<<<<<<
* for i in range(beat_seg_i * p.n_beats, (beat_seg_i + 1) * p.n_beats):
* tc_column[i] -= p.pen_val
*/
__pyx_t_1 = ((__pyx_v_p.max_beats == -1) != 0);
if (__pyx_t_1) {
__pyx_t_16 = ((__pyx_v_beat_seg_i == __pyx_v_p.min_beats) != 0);
__pyx_t_17 = __pyx_t_16;
} else {
__pyx_t_17 = __pyx_t_1;
}
if (__pyx_t_17) {
/* "radiotool/algorithms/par_build_table.pyx":89
* # last beat if max_beats is not set (== -1)
* if p.max_beats == -1 and (beat_seg_i == p.min_beats):
* for i in range(beat_seg_i * p.n_beats, (beat_seg_i + 1) * p.n_beats): # <<<<<<<<<<<<<<
* tc_column[i] -= p.pen_val
*
*/
__pyx_t_26 = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats);
for (__pyx_t_19 = (__pyx_v_beat_seg_i * __pyx_v_p.n_beats); __pyx_t_19 < __pyx_t_26; __pyx_t_19+=1) {
__pyx_v_i = __pyx_t_19;
/* "radiotool/algorithms/par_build_table.pyx":90
* if p.max_beats == -1 and (beat_seg_i == p.min_beats):
* for i in range(beat_seg_i * p.n_beats, (beat_seg_i + 1) * p.n_beats):
* tc_column[i] -= p.pen_val # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_27 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_27 * __pyx_v_tc_column.strides[0]) )) -= __pyx_v_p.pen_val;
}
goto __pyx_L36;
}
__pyx_L36:;
goto __pyx_L28;
}
__pyx_L28:;
/* "radiotool/algorithms/par_build_table.pyx":23
*
*
* cdef void get_tc_column(double[:, :] tc, int column, double[:] tc_column, int backward, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int tc_index = 0
* cdef int beat_seg_i = 0
*/
/* function exit code */
}
/* "radiotool/algorithms/par_build_table.pyx":93
*
*
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int pen_index = 0
* if i >= p.p0_full:
*/
static double __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__Pyx_memviewslice __pyx_v_pen, int __pyx_v_i, int __pyx_v_l, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p) {
int __pyx_v_pen_index;
double __pyx_v_new_pen;
double __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
/* "radiotool/algorithms/par_build_table.pyx":94
*
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil:
* cdef int pen_index = 0 # <<<<<<<<<<<<<<
* if i >= p.p0_full:
* pen_index = p.n_beats + (i - p.p0_full)
*/
__pyx_v_pen_index = 0;
/* "radiotool/algorithms/par_build_table.pyx":95
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil:
* cdef int pen_index = 0
* if i >= p.p0_full: # <<<<<<<<<<<<<<
* pen_index = p.n_beats + (i - p.p0_full)
* else:
*/
__pyx_t_1 = ((__pyx_v_i >= __pyx_v_p.p0_full) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":96
* cdef int pen_index = 0
* if i >= p.p0_full:
* pen_index = p.n_beats + (i - p.p0_full) # <<<<<<<<<<<<<<
* else:
* pen_index = i % p.n_beats
*/
__pyx_v_pen_index = (__pyx_v_p.n_beats + (__pyx_v_i - __pyx_v_p.p0_full));
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":98
* pen_index = p.n_beats + (i - p.p0_full)
* else:
* pen_index = i % p.n_beats # <<<<<<<<<<<<<<
* cdef double new_pen = pen[pen_index, l]
*
*/
__pyx_v_pen_index = (__pyx_v_i % __pyx_v_p.n_beats);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":99
* else:
* pen_index = i % p.n_beats
* cdef double new_pen = pen[pen_index, l] # <<<<<<<<<<<<<<
*
* #--- CONSTRAINTS ---#
*/
__pyx_t_2 = __pyx_v_pen_index;
__pyx_t_3 = __pyx_v_l;
__pyx_v_new_pen = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_pen.data + __pyx_t_2 * __pyx_v_pen.strides[0]) ) + __pyx_t_3 * __pyx_v_pen.strides[1]) )));
/* "radiotool/algorithms/par_build_table.pyx":103
* #--- CONSTRAINTS ---#
* # * don't start song in segment beat other than first
* if global_start_l == 0 and (p.n_beats <= i < p.p0_full): # <<<<<<<<<<<<<<
* new_pen += p.pen_val
*
*/
__pyx_t_1 = ((__pyx_v_global_start_l == 0) != 0);
if (__pyx_t_1) {
__pyx_t_4 = (__pyx_v_p.n_beats <= __pyx_v_i);
if (__pyx_t_4) {
__pyx_t_4 = (__pyx_v_i < __pyx_v_p.p0_full);
}
__pyx_t_5 = (__pyx_t_4 != 0);
} else {
__pyx_t_5 = __pyx_t_1;
}
if (__pyx_t_5) {
/* "radiotool/algorithms/par_build_table.pyx":104
* # * don't start song in segment beat other than first
* if global_start_l == 0 and (p.n_beats <= i < p.p0_full):
* new_pen += p.pen_val # <<<<<<<<<<<<<<
*
* return new_pen
*/
__pyx_v_new_pen = (__pyx_v_new_pen + __pyx_v_p.pen_val);
goto __pyx_L4;
}
__pyx_L4:;
/* "radiotool/algorithms/par_build_table.pyx":106
* new_pen += p.pen_val
*
* return new_pen # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_new_pen;
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":93
*
*
* cdef double get_pen_value(double[:, :] pen, int i, int l, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int pen_index = 0
* if i >= p.p0_full:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "radiotool/algorithms/par_build_table.pyx":109
*
*
* cdef void get_pen_column(double[:, :] pen, int column, double[:] new_pen, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__Pyx_memviewslice __pyx_v_pen, int __pyx_v_column, __Pyx_memviewslice __pyx_v_new_pen, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p) {
int __pyx_v_i;
int __pyx_v_j;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
/* "radiotool/algorithms/par_build_table.pyx":112
* cdef int i, j
*
* for i in range(p.max_beats): # <<<<<<<<<<<<<<
* for j in range(p.p0):
* new_pen[i * p.n_beats + j] = pen[j, column]
*/
__pyx_t_1 = __pyx_v_p.max_beats;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "radiotool/algorithms/par_build_table.pyx":113
*
* for i in range(p.max_beats):
* for j in range(p.p0): # <<<<<<<<<<<<<<
* new_pen[i * p.n_beats + j] = pen[j, column]
*
*/
__pyx_t_3 = __pyx_v_p.p0;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_j = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":114
* for i in range(p.max_beats):
* for j in range(p.p0):
* new_pen[i * p.n_beats + j] = pen[j, column] # <<<<<<<<<<<<<<
*
* for i in range(p.p0_full, p.all_full):
*/
__pyx_t_5 = __pyx_v_j;
__pyx_t_6 = __pyx_v_column;
__pyx_t_7 = ((__pyx_v_i * __pyx_v_p.n_beats) + __pyx_v_j);
*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_7 * __pyx_v_new_pen.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_pen.data + __pyx_t_5 * __pyx_v_pen.strides[0]) ) + __pyx_t_6 * __pyx_v_pen.strides[1]) )));
}
}
/* "radiotool/algorithms/par_build_table.pyx":116
* new_pen[i * p.n_beats + j] = pen[j, column]
*
* for i in range(p.p0_full, p.all_full): # <<<<<<<<<<<<<<
* new_pen[i] = pen[p.p0 + i - p.p0_full, column]
*
*/
__pyx_t_1 = __pyx_v_p.all_full;
for (__pyx_t_2 = __pyx_v_p.p0_full; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "radiotool/algorithms/par_build_table.pyx":117
*
* for i in range(p.p0_full, p.all_full):
* new_pen[i] = pen[p.p0 + i - p.p0_full, column] # <<<<<<<<<<<<<<
*
* #--- CONSTRAINTS ---#
*/
__pyx_t_3 = ((__pyx_v_p.p0 + __pyx_v_i) - __pyx_v_p.p0_full);
__pyx_t_4 = __pyx_v_column;
__pyx_t_8 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_8 * __pyx_v_new_pen.strides[0]) )) = (*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_pen.data + __pyx_t_3 * __pyx_v_pen.strides[0]) ) + __pyx_t_4 * __pyx_v_pen.strides[1]) )));
}
/* "radiotool/algorithms/par_build_table.pyx":121
* #--- CONSTRAINTS ---#
* # * don't start song in segment beat other than first
* if global_start_l == 0: # <<<<<<<<<<<<<<
* for i in range(p.n_beats, p.p0_full):
* new_pen[i] += p.pen_val
*/
__pyx_t_9 = ((__pyx_v_global_start_l == 0) != 0);
if (__pyx_t_9) {
/* "radiotool/algorithms/par_build_table.pyx":122
* # * don't start song in segment beat other than first
* if global_start_l == 0:
* for i in range(p.n_beats, p.p0_full): # <<<<<<<<<<<<<<
* new_pen[i] += p.pen_val
*
*/
__pyx_t_1 = __pyx_v_p.p0_full;
for (__pyx_t_2 = __pyx_v_p.n_beats; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "radiotool/algorithms/par_build_table.pyx":123
* if global_start_l == 0:
* for i in range(p.n_beats, p.p0_full):
* new_pen[i] += p.pen_val # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_10 = __pyx_v_i;
*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_10 * __pyx_v_new_pen.strides[0]) )) += __pyx_v_p.pen_val;
}
goto __pyx_L9;
}
__pyx_L9:;
/* "radiotool/algorithms/par_build_table.pyx":109
*
*
* cdef void get_pen_column(double[:, :] pen, int column, double[:] new_pen, int global_start_l, Params p) nogil: # <<<<<<<<<<<<<<
* cdef int i, j
*
*/
/* function exit code */
}
/* "radiotool/algorithms/par_build_table.pyx":126
*
*
* cdef void space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice __pyx_v_tc, __Pyx_memviewslice __pyx_v_pen, int __pyx_v_start_beat, int __pyx_v_end_beat, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p, __Pyx_memviewslice __pyx_v_cost, __Pyx_memviewslice __pyx_v_pen_val, __Pyx_memviewslice __pyx_v_vals_col, __Pyx_memviewslice __pyx_v_min_vals) {
int __pyx_v_l;
int __pyx_v_idx;
int __pyx_v_i;
int __pyx_v_beat_seg_i;
int __pyx_v_seg_start_beat;
int __pyx_v_j;
int __pyx_v_full_j;
int __pyx_v_orig_beat_j;
double __pyx_v_minval;
double __pyx_v_tmpval;
double __pyx_v_end_pen;
int __pyx_v_orig_beat_i;
int __pyx_t_1;
__Pyx_memviewslice __pyx_t_2 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
__Pyx_memviewslice __pyx_t_8 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_9;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
long __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
long __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_t_34;
int __pyx_t_35;
int __pyx_t_36;
int __pyx_t_37;
__Pyx_memviewslice __pyx_t_38 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "radiotool/algorithms/par_build_table.pyx":134
*
* # generate initial cost
* if start_beat != -1: # <<<<<<<<<<<<<<
* cost[:] = 99999999 # N.inf
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p)
*/
__pyx_t_1 = ((__pyx_v_start_beat != -1) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":135
* # generate initial cost
* if start_beat != -1:
* cost[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p)
* else:
*/
__pyx_t_3 = -1;
__pyx_t_2.data = __pyx_v_cost.data;
__pyx_t_2.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_2, 0);
__pyx_t_2.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_2.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_2.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_2.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_2.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_2.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
/* "radiotool/algorithms/par_build_table.pyx":136
* if start_beat != -1:
* cost[:] = 99999999 # N.inf
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p) # <<<<<<<<<<<<<<
* else:
* get_pen_column(pen, 0, cost, global_start_l, p)
*/
__pyx_t_3 = __pyx_v_start_beat;
*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_3 * __pyx_v_cost.strides[0]) )) = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_start_beat, 0, __pyx_v_global_start_l, __pyx_v_p);
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":138
* cost[start_beat] = get_pen_value(pen, start_beat, 0, global_start_l, p)
* else:
* get_pen_column(pen, 0, cost, global_start_l, p) # <<<<<<<<<<<<<<
*
* # optimize
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, 0, __pyx_v_cost, __pyx_v_global_start_l, __pyx_v_p);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":141
*
* # optimize
* for l in range(1, pen.shape[1]): # <<<<<<<<<<<<<<
* if l == pen.shape[1] - 1 and end_beat != -1:
* # handle end beat set
*/
__pyx_t_4 = (__pyx_v_pen.shape[1]);
for (__pyx_t_5 = 1; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_l = __pyx_t_5;
/* "radiotool/algorithms/par_build_table.pyx":142
* # optimize
* for l in range(1, pen.shape[1]):
* if l == pen.shape[1] - 1 and end_beat != -1: # <<<<<<<<<<<<<<
* # handle end beat set
* end_pen = get_pen_value(pen, end_beat, l, global_start_l + l, p)
*/
__pyx_t_1 = ((__pyx_v_l == ((__pyx_v_pen.shape[1]) - 1)) != 0);
if (__pyx_t_1) {
__pyx_t_6 = ((__pyx_v_end_beat != -1) != 0);
__pyx_t_7 = __pyx_t_6;
} else {
__pyx_t_7 = __pyx_t_1;
}
if (__pyx_t_7) {
/* "radiotool/algorithms/par_build_table.pyx":144
* if l == pen.shape[1] - 1 and end_beat != -1:
* # handle end beat set
* end_pen = get_pen_value(pen, end_beat, l, global_start_l + l, p) # <<<<<<<<<<<<<<
* get_tc_column(tc, end_beat, vals_col, 0, p)
*
*/
__pyx_v_end_pen = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_end_beat, __pyx_v_l, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":145
* # handle end beat set
* end_pen = get_pen_value(pen, end_beat, l, global_start_l + l, p)
* get_tc_column(tc, end_beat, vals_col, 0, p) # <<<<<<<<<<<<<<
*
* min_vals[:] = 99999999 # N.inf
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_end_beat, __pyx_v_vals_col, 0, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":147
* get_tc_column(tc, end_beat, vals_col, 0, p)
*
* min_vals[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* minval = -1
* for i in range(vals_col.shape[0]):
*/
__pyx_t_9 = -1;
__pyx_t_8.data = __pyx_v_min_vals.data;
__pyx_t_8.memview = __pyx_v_min_vals.memview;
__PYX_INC_MEMVIEW(&__pyx_t_8, 0);
__pyx_t_8.shape[0] = __pyx_v_min_vals.shape[0];
__pyx_t_8.strides[0] = __pyx_v_min_vals.strides[0];
__pyx_t_8.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_8.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_8.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_8.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 0);
/* "radiotool/algorithms/par_build_table.pyx":148
*
* min_vals[:] = 99999999 # N.inf
* minval = -1 # <<<<<<<<<<<<<<
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval:
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":149
* min_vals[:] = 99999999 # N.inf
* minval = -1
* for i in range(vals_col.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval:
* minval = vals_col[i] + cost[i] + end_pen
*/
__pyx_t_10 = (__pyx_v_vals_col.shape[0]);
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9+=1) {
__pyx_v_i = __pyx_t_9;
/* "radiotool/algorithms/par_build_table.pyx":150
* minval = -1
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval: # <<<<<<<<<<<<<<
* minval = vals_col[i] + cost[i] + end_pen
*
*/
__pyx_t_7 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_7) {
__pyx_t_11 = __pyx_v_i;
__pyx_t_12 = __pyx_v_i;
__pyx_t_1 = (((((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_11 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_12 * __pyx_v_cost.strides[0]) )))) + __pyx_v_end_pen) < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_1;
} else {
__pyx_t_6 = __pyx_t_7;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":151
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + end_pen < minval:
* minval = vals_col[i] + cost[i] + end_pen # <<<<<<<<<<<<<<
*
* min_vals[end_beat] = minval
*/
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_i;
__pyx_v_minval = (((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_13 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_14 * __pyx_v_cost.strides[0]) )))) + __pyx_v_end_pen);
goto __pyx_L9;
}
__pyx_L9:;
}
/* "radiotool/algorithms/par_build_table.pyx":153
* minval = vals_col[i] + cost[i] + end_pen
*
* min_vals[end_beat] = minval # <<<<<<<<<<<<<<
*
* else:
*/
__pyx_t_9 = __pyx_v_end_beat;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_9 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
goto __pyx_L6;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":156
*
* else:
* get_pen_column(pen, l, pen_val, global_start_l + l, p) # <<<<<<<<<<<<<<
*
* # Based on the nature of our problem
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, __pyx_v_l, __pyx_v_pen_val, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":171
*
* # first beat segment
* for idx in range(p.n_beats): # <<<<<<<<<<<<<<
* # could only get here from the last pause beat
* min_vals[idx] = tc[p.n_beats + p.n_pauses - 1, idx] + pen_val[idx] + cost[p.all_full - 1]
*/
__pyx_t_15 = __pyx_v_p.n_beats;
for (__pyx_t_16 = 0; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
__pyx_v_idx = __pyx_t_16;
/* "radiotool/algorithms/par_build_table.pyx":173
* for idx in range(p.n_beats):
* # could only get here from the last pause beat
* min_vals[idx] = tc[p.n_beats + p.n_pauses - 1, idx] + pen_val[idx] + cost[p.all_full - 1] # <<<<<<<<<<<<<<
*
* # all other music beat segments
*/
__pyx_t_17 = ((__pyx_v_p.n_beats + __pyx_v_p.n_pauses) - 1);
__pyx_t_18 = __pyx_v_idx;
__pyx_t_19 = __pyx_v_idx;
__pyx_t_20 = (__pyx_v_p.all_full - 1);
__pyx_t_21 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_21 * __pyx_v_min_vals.strides[0]) )) = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_17 * __pyx_v_tc.strides[0]) ) + __pyx_t_18 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_19 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_20 * __pyx_v_cost.strides[0]) ))));
}
/* "radiotool/algorithms/par_build_table.pyx":176
*
* # all other music beat segments
* for idx in range(p.n_beats, p.p0_full): # <<<<<<<<<<<<<<
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats
*/
__pyx_t_15 = __pyx_v_p.p0_full;
for (__pyx_t_16 = __pyx_v_p.n_beats; __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
__pyx_v_idx = __pyx_t_16;
/* "radiotool/algorithms/par_build_table.pyx":177
* # all other music beat segments
* for idx in range(p.n_beats, p.p0_full):
* beat_seg_i = idx / p.n_beats # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_v_beat_seg_i = (__pyx_v_idx / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":178
* for idx in range(p.n_beats, p.p0_full):
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # must have gotten here from beat_seg_i - 1
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":183
* # and minimum value will be min cost from
* # another music beat
* seg_start_beat = (beat_seg_i - 1) * p.n_beats # <<<<<<<<<<<<<<
* minval = -1
* for j in range(p.n_beats):
*/
__pyx_v_seg_start_beat = ((__pyx_v_beat_seg_i - 1) * __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":184
* # another music beat
* seg_start_beat = (beat_seg_i - 1) * p.n_beats
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":185
* seg_start_beat = (beat_seg_i - 1) * p.n_beats
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_22 = __pyx_v_p.n_beats;
for (__pyx_t_23 = 0; __pyx_t_23 < __pyx_t_22; __pyx_t_23+=1) {
__pyx_v_j = __pyx_t_23;
/* "radiotool/algorithms/par_build_table.pyx":186
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_24 = __pyx_v_j;
__pyx_t_25 = __pyx_v_orig_beat_i;
__pyx_t_26 = __pyx_v_idx;
__pyx_t_27 = (__pyx_v_seg_start_beat + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_24 * __pyx_v_tc.strides[0]) ) + __pyx_t_25 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_26 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_27 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":187
* for j in range(p.n_beats):
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
*
*/
__pyx_t_6 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_6) {
__pyx_t_7 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_7;
} else {
__pyx_t_1 = __pyx_t_6;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":188
* tmpval = tc[j, orig_beat_i] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
*
* min_vals[idx] = minval
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L16;
}
__pyx_L16:;
}
/* "radiotool/algorithms/par_build_table.pyx":190
* minval = tmpval
*
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # first pause beat:
*/
__pyx_t_22 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_22 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":195
* # must have gotten here from
* # min beat <= beat seg <= max beat
* minval = -1 # <<<<<<<<<<<<<<
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats):
* orig_beat_j = full_j % p.n_beats
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":196
* # min beat <= beat seg <= max beat
* minval = -1
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats): # <<<<<<<<<<<<<<
* orig_beat_j = full_j % p.n_beats
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
*/
__pyx_t_15 = (__pyx_v_p.n_beats * __pyx_v_p.max_beats);
for (__pyx_t_16 = (__pyx_v_p.n_beats * (__pyx_v_p.min_beats - 1)); __pyx_t_16 < __pyx_t_15; __pyx_t_16+=1) {
__pyx_v_full_j = __pyx_t_16;
/* "radiotool/algorithms/par_build_table.pyx":197
* minval = -1
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats):
* orig_beat_j = full_j % p.n_beats # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
* if minval == -1 or tmpval < minval:
*/
__pyx_v_orig_beat_j = (__pyx_v_full_j % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":198
* for full_j in range(p.n_beats * (p.min_beats - 1), p.n_beats * p.max_beats):
* orig_beat_j = full_j % p.n_beats
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_23 = __pyx_v_orig_beat_j;
__pyx_t_28 = __pyx_v_p.p0;
__pyx_t_29 = __pyx_v_p.p0_full;
__pyx_t_30 = __pyx_v_full_j;
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_23 * __pyx_v_tc.strides[0]) ) + __pyx_t_28 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_29 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_30 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":199
* orig_beat_j = full_j % p.n_beats
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[p.p0_full] = minval
*/
__pyx_t_1 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_1) {
__pyx_t_6 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_7 = __pyx_t_6;
} else {
__pyx_t_7 = __pyx_t_1;
}
if (__pyx_t_7) {
/* "radiotool/algorithms/par_build_table.pyx":200
* tmpval = tc[orig_beat_j, p.p0] + pen_val[p.p0_full] + cost[full_j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[p.p0_full] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L19;
}
__pyx_L19:;
}
/* "radiotool/algorithms/par_build_table.pyx":201
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[p.p0_full] = minval # <<<<<<<<<<<<<<
*
* # other pause beat
*/
__pyx_t_15 = __pyx_v_p.p0_full;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_15 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
/* "radiotool/algorithms/par_build_table.pyx":204
*
* # other pause beat
* for idx in range(p.p0_full + 1, p.all_full): # <<<<<<<<<<<<<<
* orig_beat_i = p.p0 + (idx - p.p0_full)
*
*/
__pyx_t_16 = __pyx_v_p.all_full;
for (__pyx_t_31 = (__pyx_v_p.p0_full + 1); __pyx_t_31 < __pyx_t_16; __pyx_t_31+=1) {
__pyx_v_idx = __pyx_t_31;
/* "radiotool/algorithms/par_build_table.pyx":205
* # other pause beat
* for idx in range(p.p0_full + 1, p.all_full):
* orig_beat_i = p.p0 + (idx - p.p0_full) # <<<<<<<<<<<<<<
*
* # must have gotten here from another pause beat
*/
__pyx_v_orig_beat_i = (__pyx_v_p.p0 + (__pyx_v_idx - __pyx_v_p.p0_full));
/* "radiotool/algorithms/par_build_table.pyx":208
*
* # must have gotten here from another pause beat
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_pauses):
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":209
* # must have gotten here from another pause beat
* minval = -1
* for j in range(p.n_pauses): # <<<<<<<<<<<<<<
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_32 = __pyx_v_p.n_pauses;
for (__pyx_t_33 = 0; __pyx_t_33 < __pyx_t_32; __pyx_t_33+=1) {
__pyx_v_j = __pyx_t_33;
/* "radiotool/algorithms/par_build_table.pyx":210
* minval = -1
* for j in range(p.n_pauses):
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_34 = (__pyx_v_p.p0 + __pyx_v_j);
__pyx_t_35 = __pyx_v_orig_beat_i;
__pyx_t_36 = __pyx_v_idx;
__pyx_t_37 = (__pyx_v_p.p0_full + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_34 * __pyx_v_tc.strides[0]) ) + __pyx_t_35 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_36 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_37 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":211
* for j in range(p.n_pauses):
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[idx] = minval
*/
__pyx_t_7 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_7) {
__pyx_t_1 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_1;
} else {
__pyx_t_6 = __pyx_t_7;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":212
* tmpval = tc[p.p0 + j, orig_beat_i] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[idx] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L24;
}
__pyx_L24:;
}
/* "radiotool/algorithms/par_build_table.pyx":213
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* cost[:] = min_vals
*/
__pyx_t_32 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_32 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
}
__pyx_L6:;
/* "radiotool/algorithms/par_build_table.pyx":215
* min_vals[idx] = minval
*
* cost[:] = min_vals # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_16 = -1;
__pyx_t_38.data = __pyx_v_cost.data;
__pyx_t_38.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_38, 0);
__pyx_t_38.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_38.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_38.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_copy_contents(__pyx_v_min_vals, __pyx_t_38, 1, 1, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__PYX_XDEC_MEMVIEW(&__pyx_t_38, 0);
}
/* "radiotool/algorithms/par_build_table.pyx":126
*
*
* cdef void space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_8, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_38, 0);
__Pyx_WriteUnraisable("radiotool.algorithms.par_build_table.space_efficient_cost_with_duration_constraint", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_L0:;
}
/* "radiotool/algorithms/par_build_table.pyx":218
*
*
* cdef void backward_space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_backward_space_efficient_cost_with_duration_constraint(__Pyx_memviewslice __pyx_v_tc, __Pyx_memviewslice __pyx_v_pen, int __pyx_v_start_beat, int __pyx_v_end_beat, int __pyx_v_global_start_l, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p, __Pyx_memviewslice __pyx_v_cost, __Pyx_memviewslice __pyx_v_pen_val, __Pyx_memviewslice __pyx_v_vals_col, __Pyx_memviewslice __pyx_v_min_vals) {
int __pyx_v_l;
int __pyx_v_idx;
int __pyx_v_i;
int __pyx_v_beat_seg_i;
int __pyx_v_seg_start_beat;
int __pyx_v_j;
double __pyx_v_minval;
double __pyx_v_tmpval;
double __pyx_v_start_pen;
int __pyx_v_orig_beat_i;
int __pyx_t_1;
__Pyx_memviewslice __pyx_t_2 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_t_12;
int __pyx_t_13;
long __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
int __pyx_t_17;
int __pyx_t_18;
int __pyx_t_19;
int __pyx_t_20;
int __pyx_t_21;
int __pyx_t_22;
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
int __pyx_t_27;
int __pyx_t_28;
int __pyx_t_29;
int __pyx_t_30;
int __pyx_t_31;
int __pyx_t_32;
int __pyx_t_33;
int __pyx_t_34;
int __pyx_t_35;
int __pyx_t_36;
int __pyx_t_37;
int __pyx_t_38;
int __pyx_t_39;
int __pyx_t_40;
int __pyx_t_41;
long __pyx_t_42;
int __pyx_t_43;
long __pyx_t_44;
__Pyx_memviewslice __pyx_t_45 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "radiotool/algorithms/par_build_table.pyx":226
*
* # generate initial cost
* if end_beat != -1: # <<<<<<<<<<<<<<
* cost[:] = 99999999 # N.inf
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p)
*/
__pyx_t_1 = ((__pyx_v_end_beat != -1) != 0);
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":227
* # generate initial cost
* if end_beat != -1:
* cost[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p)
* else:
*/
__pyx_t_3 = -1;
__pyx_t_2.data = __pyx_v_cost.data;
__pyx_t_2.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_2, 0);
__pyx_t_2.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_2.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_2.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_2.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_2.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_2.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
/* "radiotool/algorithms/par_build_table.pyx":228
* if end_beat != -1:
* cost[:] = 99999999 # N.inf
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p) # <<<<<<<<<<<<<<
* else:
* get_pen_column(pen, pen.shape[1] - 1, cost, global_start_l + pen.shape[1] - 1, p)
*/
__pyx_t_3 = __pyx_v_end_beat;
*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_3 * __pyx_v_cost.strides[0]) )) = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_end_beat, ((__pyx_v_pen.shape[1]) - 1), ((__pyx_v_global_start_l + (__pyx_v_pen.shape[1])) - 1), __pyx_v_p);
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":230
* cost[end_beat] = get_pen_value(pen, end_beat, pen.shape[1] - 1, global_start_l + pen.shape[1] - 1, p)
* else:
* get_pen_column(pen, pen.shape[1] - 1, cost, global_start_l + pen.shape[1] - 1, p) # <<<<<<<<<<<<<<
*
* # optimize
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, ((__pyx_v_pen.shape[1]) - 1), __pyx_v_cost, ((__pyx_v_global_start_l + (__pyx_v_pen.shape[1])) - 1), __pyx_v_p);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":233
*
* # optimize
* for l in xrange(pen.shape[1] - 2, -1, -1): # <<<<<<<<<<<<<<
* if l == 0 and start_beat != -1:
* # handle start beat set
*/
for (__pyx_t_4 = ((__pyx_v_pen.shape[1]) - 2); __pyx_t_4 > -1; __pyx_t_4-=1) {
__pyx_v_l = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":234
* # optimize
* for l in xrange(pen.shape[1] - 2, -1, -1):
* if l == 0 and start_beat != -1: # <<<<<<<<<<<<<<
* # handle start beat set
* start_pen = get_pen_value(pen, start_beat, l, global_start_l + l, p)
*/
__pyx_t_1 = ((__pyx_v_l == 0) != 0);
if (__pyx_t_1) {
__pyx_t_5 = ((__pyx_v_start_beat != -1) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_1;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":236
* if l == 0 and start_beat != -1:
* # handle start beat set
* start_pen = get_pen_value(pen, start_beat, l, global_start_l + l, p) # <<<<<<<<<<<<<<
* get_tc_column(tc, start_beat, vals_col, 1, p)
*
*/
__pyx_v_start_pen = __pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_value(__pyx_v_pen, __pyx_v_start_beat, __pyx_v_l, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":237
* # handle start beat set
* start_pen = get_pen_value(pen, start_beat, l, global_start_l + l, p)
* get_tc_column(tc, start_beat, vals_col, 1, p) # <<<<<<<<<<<<<<
*
* min_vals[:] = 99999999 # N.inf
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_start_beat, __pyx_v_vals_col, 1, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":239
* get_tc_column(tc, start_beat, vals_col, 1, p)
*
* min_vals[:] = 99999999 # N.inf # <<<<<<<<<<<<<<
* minval = -1
* for i in range(vals_col.shape[0]):
*/
__pyx_t_8 = -1;
__pyx_t_7.data = __pyx_v_min_vals.data;
__pyx_t_7.memview = __pyx_v_min_vals.memview;
__PYX_INC_MEMVIEW(&__pyx_t_7, 0);
__pyx_t_7.shape[0] = __pyx_v_min_vals.shape[0];
__pyx_t_7.strides[0] = __pyx_v_min_vals.strides[0];
__pyx_t_7.suboffsets[0] = -1;
{
double __pyx_temp_scalar = 99999999.0;
{
Py_ssize_t __pyx_temp_extent_0 = __pyx_t_7.shape[0];
Py_ssize_t __pyx_temp_stride_0 = __pyx_t_7.strides[0];
char *__pyx_temp_pointer_0;
Py_ssize_t __pyx_temp_idx_0;
__pyx_temp_pointer_0 = __pyx_t_7.data;
for (__pyx_temp_idx_0 = 0; __pyx_temp_idx_0 < __pyx_temp_extent_0; __pyx_temp_idx_0++) {
*((double *) __pyx_temp_pointer_0) = __pyx_temp_scalar;
__pyx_temp_pointer_0 += __pyx_temp_stride_0;
}
}
}
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
/* "radiotool/algorithms/par_build_table.pyx":240
*
* min_vals[:] = 99999999 # N.inf
* minval = -1 # <<<<<<<<<<<<<<
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval:
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":241
* min_vals[:] = 99999999 # N.inf
* minval = -1
* for i in range(vals_col.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval:
* minval = vals_col[i] + cost[i] + start_pen
*/
__pyx_t_9 = (__pyx_v_vals_col.shape[0]);
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "radiotool/algorithms/par_build_table.pyx":242
* minval = -1
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval: # <<<<<<<<<<<<<<
* minval = vals_col[i] + cost[i] + start_pen
*
*/
__pyx_t_6 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_6) {
__pyx_t_10 = __pyx_v_i;
__pyx_t_11 = __pyx_v_i;
__pyx_t_1 = (((((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_10 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_11 * __pyx_v_cost.strides[0]) )))) + __pyx_v_start_pen) < __pyx_v_minval) != 0);
__pyx_t_5 = __pyx_t_1;
} else {
__pyx_t_5 = __pyx_t_6;
}
if (__pyx_t_5) {
/* "radiotool/algorithms/par_build_table.pyx":243
* for i in range(vals_col.shape[0]):
* if minval == -1 or vals_col[i] + cost[i] + start_pen < minval:
* minval = vals_col[i] + cost[i] + start_pen # <<<<<<<<<<<<<<
*
* min_vals[start_beat] = minval
*/
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = __pyx_v_i;
__pyx_v_minval = (((*((double *) ( /* dim=0 */ (__pyx_v_vals_col.data + __pyx_t_12 * __pyx_v_vals_col.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_13 * __pyx_v_cost.strides[0]) )))) + __pyx_v_start_pen);
goto __pyx_L9;
}
__pyx_L9:;
}
/* "radiotool/algorithms/par_build_table.pyx":245
* minval = vals_col[i] + cost[i] + start_pen
*
* min_vals[start_beat] = minval # <<<<<<<<<<<<<<
*
* else:
*/
__pyx_t_8 = __pyx_v_start_beat;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_8 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
goto __pyx_L6;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":248
*
* else:
* get_pen_column(pen, l, pen_val, global_start_l + l, p) # <<<<<<<<<<<<<<
*
* # categories of beats we could be at before this one
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, __pyx_v_l, __pyx_v_pen_val, (__pyx_v_global_start_l + __pyx_v_l), __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":253
*
* # beat segment before min_beat
* for idx in range(p.n_beats * (p.min_beats - 1)): # <<<<<<<<<<<<<<
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats
*/
__pyx_t_14 = (__pyx_v_p.n_beats * (__pyx_v_p.min_beats - 1));
for (__pyx_t_15 = 0; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_idx = __pyx_t_15;
/* "radiotool/algorithms/par_build_table.pyx":254
* # beat segment before min_beat
* for idx in range(p.n_beats * (p.min_beats - 1)):
* beat_seg_i = idx / p.n_beats # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_v_beat_seg_i = (__pyx_v_idx / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":255
* for idx in range(p.n_beats * (p.min_beats - 1)):
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # could only be going to beat_seg_i + 1
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":258
*
* # could only be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats # <<<<<<<<<<<<<<
* minval = -1
* for j in range(p.n_beats):
*/
__pyx_v_seg_start_beat = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":259
* # could only be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":260
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_16 = __pyx_v_p.n_beats;
for (__pyx_t_17 = 0; __pyx_t_17 < __pyx_t_16; __pyx_t_17+=1) {
__pyx_v_j = __pyx_t_17;
/* "radiotool/algorithms/par_build_table.pyx":261
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_18 = __pyx_v_orig_beat_i;
__pyx_t_19 = __pyx_v_j;
__pyx_t_20 = __pyx_v_idx;
__pyx_t_21 = (__pyx_v_seg_start_beat + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_18 * __pyx_v_tc.strides[0]) ) + __pyx_t_19 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_20 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_21 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":262
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
*
*/
__pyx_t_5 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_5) {
__pyx_t_6 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_6;
} else {
__pyx_t_1 = __pyx_t_5;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":263
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
*
* min_vals[idx] = minval
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L14;
}
__pyx_L14:;
}
/* "radiotool/algorithms/par_build_table.pyx":265
* minval = tmpval
*
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # beat segment between min beat and max beat
*/
__pyx_t_16 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_16 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":268
*
* # beat segment between min beat and max beat
* for idx in range(p.n_beats * (p.min_beats - 1), p.n_beats * (p.max_beats - 1)): # <<<<<<<<<<<<<<
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats
*/
__pyx_t_14 = (__pyx_v_p.n_beats * (__pyx_v_p.max_beats - 1));
for (__pyx_t_15 = (__pyx_v_p.n_beats * (__pyx_v_p.min_beats - 1)); __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_idx = __pyx_t_15;
/* "radiotool/algorithms/par_build_table.pyx":269
* # beat segment between min beat and max beat
* for idx in range(p.n_beats * (p.min_beats - 1), p.n_beats * (p.max_beats - 1)):
* beat_seg_i = idx / p.n_beats # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_v_beat_seg_i = (__pyx_v_idx / __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":270
* for idx in range(p.n_beats * (p.min_beats - 1), p.n_beats * (p.max_beats - 1)):
* beat_seg_i = idx / p.n_beats
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # could be going to beat_seg_i + 1
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":273
*
* # could be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats # <<<<<<<<<<<<<<
* minval = -1
* for j in range(p.n_beats):
*/
__pyx_v_seg_start_beat = ((__pyx_v_beat_seg_i + 1) * __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":274
* # could be going to beat_seg_i + 1
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":275
* seg_start_beat = (beat_seg_i + 1) * p.n_beats
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_17 = __pyx_v_p.n_beats;
for (__pyx_t_22 = 0; __pyx_t_22 < __pyx_t_17; __pyx_t_22+=1) {
__pyx_v_j = __pyx_t_22;
/* "radiotool/algorithms/par_build_table.pyx":276
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_23 = __pyx_v_orig_beat_i;
__pyx_t_24 = __pyx_v_j;
__pyx_t_25 = __pyx_v_idx;
__pyx_t_26 = (__pyx_v_seg_start_beat + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_23 * __pyx_v_tc.strides[0]) ) + __pyx_t_24 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_25 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_26 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":277
* for j in range(p.n_beats):
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* # or could be going to first pause beat
*/
__pyx_t_1 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_1) {
__pyx_t_5 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_1;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":278
* tmpval = tc[orig_beat_i, j] + pen_val[idx] + cost[seg_start_beat + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* # or could be going to first pause beat
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full]
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L19;
}
__pyx_L19:;
}
/* "radiotool/algorithms/par_build_table.pyx":280
* minval = tmpval
* # or could be going to first pause beat
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_17 = __pyx_v_orig_beat_i;
__pyx_t_22 = __pyx_v_p.p0;
__pyx_t_27 = __pyx_v_idx;
__pyx_t_28 = __pyx_v_p.p0_full;
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_17 * __pyx_v_tc.strides[0]) ) + __pyx_t_22 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_27 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_28 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":281
* # or could be going to first pause beat
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
*
*/
__pyx_t_6 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_6) {
__pyx_t_1 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_5 = __pyx_t_1;
} else {
__pyx_t_5 = __pyx_t_6;
}
if (__pyx_t_5) {
/* "radiotool/algorithms/par_build_table.pyx":282
* tmpval = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
*
* min_vals[idx] = minval
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L20;
}
__pyx_L20:;
/* "radiotool/algorithms/par_build_table.pyx":284
* minval = tmpval
*
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # max beat segment
*/
__pyx_t_29 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_29 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":287
*
* # max beat segment
* for idx in range(p.n_beats * (p.max_beats - 1), p.n_beats * p.max_beats): # <<<<<<<<<<<<<<
* orig_beat_i = idx % p.n_beats
*
*/
__pyx_t_15 = (__pyx_v_p.n_beats * __pyx_v_p.max_beats);
for (__pyx_t_30 = (__pyx_v_p.n_beats * (__pyx_v_p.max_beats - 1)); __pyx_t_30 < __pyx_t_15; __pyx_t_30+=1) {
__pyx_v_idx = __pyx_t_30;
/* "radiotool/algorithms/par_build_table.pyx":288
* # max beat segment
* for idx in range(p.n_beats * (p.max_beats - 1), p.n_beats * p.max_beats):
* orig_beat_i = idx % p.n_beats # <<<<<<<<<<<<<<
*
* # must be going to first pause beat
*/
__pyx_v_orig_beat_i = (__pyx_v_idx % __pyx_v_p.n_beats);
/* "radiotool/algorithms/par_build_table.pyx":291
*
* # must be going to first pause beat
* min_vals[idx] = tc[orig_beat_i, p.p0] + pen_val[idx] + cost[p.p0_full] # <<<<<<<<<<<<<<
*
* # pause beats except the last one
*/
__pyx_t_31 = __pyx_v_orig_beat_i;
__pyx_t_32 = __pyx_v_p.p0;
__pyx_t_33 = __pyx_v_idx;
__pyx_t_34 = __pyx_v_p.p0_full;
__pyx_t_35 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_35 * __pyx_v_min_vals.strides[0]) )) = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_31 * __pyx_v_tc.strides[0]) ) + __pyx_t_32 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_33 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_34 * __pyx_v_cost.strides[0]) ))));
}
/* "radiotool/algorithms/par_build_table.pyx":294
*
* # pause beats except the last one
* for idx in range(p.p0_full, p.all_full - 1): # <<<<<<<<<<<<<<
* orig_beat_i = p.p0 + (idx - p.p0_full)
*
*/
__pyx_t_14 = (__pyx_v_p.all_full - 1);
for (__pyx_t_15 = __pyx_v_p.p0_full; __pyx_t_15 < __pyx_t_14; __pyx_t_15+=1) {
__pyx_v_idx = __pyx_t_15;
/* "radiotool/algorithms/par_build_table.pyx":295
* # pause beats except the last one
* for idx in range(p.p0_full, p.all_full - 1):
* orig_beat_i = p.p0 + (idx - p.p0_full) # <<<<<<<<<<<<<<
*
* # could only be going to another pause beat
*/
__pyx_v_orig_beat_i = (__pyx_v_p.p0 + (__pyx_v_idx - __pyx_v_p.p0_full));
/* "radiotool/algorithms/par_build_table.pyx":298
*
* # could only be going to another pause beat
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_pauses):
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":299
* # could only be going to another pause beat
* minval = -1
* for j in range(p.n_pauses): # <<<<<<<<<<<<<<
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_30 = __pyx_v_p.n_pauses;
for (__pyx_t_36 = 0; __pyx_t_36 < __pyx_t_30; __pyx_t_36+=1) {
__pyx_v_j = __pyx_t_36;
/* "radiotool/algorithms/par_build_table.pyx":300
* minval = -1
* for j in range(p.n_pauses):
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_37 = __pyx_v_orig_beat_i;
__pyx_t_38 = (__pyx_v_p.p0 + __pyx_v_j);
__pyx_t_39 = __pyx_v_idx;
__pyx_t_40 = (__pyx_v_p.p0_full + __pyx_v_j);
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_37 * __pyx_v_tc.strides[0]) ) + __pyx_t_38 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_39 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_40 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":301
* for j in range(p.n_pauses):
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[idx] = minval
*/
__pyx_t_5 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_5) {
__pyx_t_6 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_6;
} else {
__pyx_t_1 = __pyx_t_5;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":302
* tmpval = tc[orig_beat_i, p.p0 + j] + pen_val[idx] + cost[p.p0_full + j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[idx] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L27;
}
__pyx_L27:;
}
/* "radiotool/algorithms/par_build_table.pyx":303
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[idx] = minval # <<<<<<<<<<<<<<
*
* # last pause beat
*/
__pyx_t_30 = __pyx_v_idx;
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_30 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
/* "radiotool/algorithms/par_build_table.pyx":306
*
* # last pause beat
* minval = -1 # <<<<<<<<<<<<<<
* for j in range(p.n_beats):
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":307
* # last pause beat
* minval = -1
* for j in range(p.n_beats): # <<<<<<<<<<<<<<
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
* if minval == -1 or tmpval < minval:
*/
__pyx_t_15 = __pyx_v_p.n_beats;
for (__pyx_t_36 = 0; __pyx_t_36 < __pyx_t_15; __pyx_t_36+=1) {
__pyx_v_j = __pyx_t_36;
/* "radiotool/algorithms/par_build_table.pyx":308
* minval = -1
* for j in range(p.n_beats):
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j] # <<<<<<<<<<<<<<
* if minval == -1 or tmpval < minval:
* minval = tmpval
*/
__pyx_t_14 = ((__pyx_v_p.p0 + __pyx_v_p.n_pauses) - 1);
__pyx_t_41 = __pyx_v_j;
__pyx_t_42 = (__pyx_v_p.all_full - 1);
__pyx_t_43 = __pyx_v_j;
__pyx_v_tmpval = (((*((double *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_tc.data + __pyx_t_14 * __pyx_v_tc.strides[0]) ) + __pyx_t_41 * __pyx_v_tc.strides[1]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_pen_val.data + __pyx_t_42 * __pyx_v_pen_val.strides[0]) )))) + (*((double *) ( /* dim=0 */ (__pyx_v_cost.data + __pyx_t_43 * __pyx_v_cost.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":309
* for j in range(p.n_beats):
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
* if minval == -1 or tmpval < minval: # <<<<<<<<<<<<<<
* minval = tmpval
* min_vals[p.all_full - 1] = minval
*/
__pyx_t_1 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_1) {
__pyx_t_5 = ((__pyx_v_tmpval < __pyx_v_minval) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_1;
}
if (__pyx_t_6) {
/* "radiotool/algorithms/par_build_table.pyx":310
* tmpval = tc[p.p0 + p.n_pauses - 1, j] + pen_val[p.all_full - 1] + cost[j]
* if minval == -1 or tmpval < minval:
* minval = tmpval # <<<<<<<<<<<<<<
* min_vals[p.all_full - 1] = minval
*
*/
__pyx_v_minval = __pyx_v_tmpval;
goto __pyx_L30;
}
__pyx_L30:;
}
/* "radiotool/algorithms/par_build_table.pyx":311
* if minval == -1 or tmpval < minval:
* minval = tmpval
* min_vals[p.all_full - 1] = minval # <<<<<<<<<<<<<<
*
* cost[:] = min_vals
*/
__pyx_t_44 = (__pyx_v_p.all_full - 1);
*((double *) ( /* dim=0 */ (__pyx_v_min_vals.data + __pyx_t_44 * __pyx_v_min_vals.strides[0]) )) = __pyx_v_minval;
}
__pyx_L6:;
/* "radiotool/algorithms/par_build_table.pyx":313
* min_vals[p.all_full - 1] = minval
*
* cost[:] = min_vals # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_15 = -1;
__pyx_t_45.data = __pyx_v_cost.data;
__pyx_t_45.memview = __pyx_v_cost.memview;
__PYX_INC_MEMVIEW(&__pyx_t_45, 0);
__pyx_t_45.shape[0] = __pyx_v_cost.shape[0];
__pyx_t_45.strides[0] = __pyx_v_cost.strides[0];
__pyx_t_45.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_copy_contents(__pyx_v_min_vals, __pyx_t_45, 1, 1, 0) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 313; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__PYX_XDEC_MEMVIEW(&__pyx_t_45, 0);
}
/* "radiotool/algorithms/par_build_table.pyx":218
*
*
* cdef void backward_space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int global_start_l, Params p,
* double[:] cost, double[:] pen_val, double[:] vals_col, double[:] min_vals) nogil:
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_2, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_45, 0);
__Pyx_WriteUnraisable("radiotool.algorithms.par_build_table.backward_space_efficient_cost_with_duration_constraint", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_L0:;
}
/* "radiotool/algorithms/par_build_table.pyx":316
*
*
* cdef inline int minimum(double[:] buffer1, double[:] buffer2) nogil: # <<<<<<<<<<<<<<
* cdef int idx
* cdef int opt_i = 0
*/
static CYTHON_INLINE int __pyx_f_9radiotool_10algorithms_15par_build_table_minimum(__Pyx_memviewslice __pyx_v_buffer1, __Pyx_memviewslice __pyx_v_buffer2) {
int __pyx_v_idx;
int __pyx_v_opt_i;
double __pyx_v_minval;
int __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
/* "radiotool/algorithms/par_build_table.pyx":318
* cdef inline int minimum(double[:] buffer1, double[:] buffer2) nogil:
* cdef int idx
* cdef int opt_i = 0 # <<<<<<<<<<<<<<
* cdef double minval = buffer1[0] + buffer2[0]
* for idx in range(1, buffer1.shape[0]):
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":319
* cdef int idx
* cdef int opt_i = 0
* cdef double minval = buffer1[0] + buffer2[0] # <<<<<<<<<<<<<<
* for idx in range(1, buffer1.shape[0]):
* if buffer1[idx] + buffer2[idx] < minval:
*/
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_buffer1.data + __pyx_t_1 * __pyx_v_buffer1.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_buffer2.data + __pyx_t_2 * __pyx_v_buffer2.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":320
* cdef int opt_i = 0
* cdef double minval = buffer1[0] + buffer2[0]
* for idx in range(1, buffer1.shape[0]): # <<<<<<<<<<<<<<
* if buffer1[idx] + buffer2[idx] < minval:
* minval = buffer1[idx] + buffer2[idx]
*/
__pyx_t_3 = (__pyx_v_buffer1.shape[0]);
for (__pyx_t_4 = 1; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":321
* cdef double minval = buffer1[0] + buffer2[0]
* for idx in range(1, buffer1.shape[0]):
* if buffer1[idx] + buffer2[idx] < minval: # <<<<<<<<<<<<<<
* minval = buffer1[idx] + buffer2[idx]
* opt_i = idx
*/
__pyx_t_5 = __pyx_v_idx;
__pyx_t_6 = __pyx_v_idx;
__pyx_t_7 = ((((*((double *) ( /* dim=0 */ (__pyx_v_buffer1.data + __pyx_t_5 * __pyx_v_buffer1.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_buffer2.data + __pyx_t_6 * __pyx_v_buffer2.strides[0]) )))) < __pyx_v_minval) != 0);
if (__pyx_t_7) {
/* "radiotool/algorithms/par_build_table.pyx":322
* for idx in range(1, buffer1.shape[0]):
* if buffer1[idx] + buffer2[idx] < minval:
* minval = buffer1[idx] + buffer2[idx] # <<<<<<<<<<<<<<
* opt_i = idx
*
*/
__pyx_t_8 = __pyx_v_idx;
__pyx_t_9 = __pyx_v_idx;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_buffer1.data + __pyx_t_8 * __pyx_v_buffer1.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_buffer2.data + __pyx_t_9 * __pyx_v_buffer2.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":323
* if buffer1[idx] + buffer2[idx] < minval:
* minval = buffer1[idx] + buffer2[idx]
* opt_i = idx # <<<<<<<<<<<<<<
*
* return opt_i
*/
__pyx_v_opt_i = __pyx_v_idx;
goto __pyx_L5;
}
__pyx_L5:;
}
/* "radiotool/algorithms/par_build_table.pyx":325
* opt_i = idx
*
* return opt_i # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_opt_i;
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":316
*
*
* cdef inline int minimum(double[:] buffer1, double[:] buffer2) nogil: # <<<<<<<<<<<<<<
* cdef int idx
* cdef int opt_i = 0
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "radiotool/algorithms/par_build_table.pyx":328
*
*
* cdef void divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int offset,
* int[:] global_path, Params p,
*/
static void __pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__Pyx_memviewslice __pyx_v_tc, __Pyx_memviewslice __pyx_v_pen, int __pyx_v_start_beat, int __pyx_v_end_beat, int __pyx_v_offset, __Pyx_memviewslice __pyx_v_global_path, struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p, __Pyx_memviewslice __pyx_v_f, __Pyx_memviewslice __pyx_v_g, __Pyx_memviewslice __pyx_v_mv1, __Pyx_memviewslice __pyx_v_mv2, __Pyx_memviewslice __pyx_v_mv3, __Pyx_memviewslice __pyx_v_mv4, __Pyx_memviewslice __pyx_v_mv5, __Pyx_memviewslice __pyx_v_mv6) {
int __pyx_v_l;
__Pyx_memviewslice __pyx_v_new_pen = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_tc_column = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_i;
int __pyx_v_opt_i;
int __pyx_v_l_over_2;
double __pyx_v_minval;
int __pyx_v_prange_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
int __pyx_t_11;
long __pyx_t_12;
int __pyx_t_13;
int __pyx_t_14;
int __pyx_t_15;
int __pyx_t_16;
long __pyx_t_17;
long __pyx_t_18;
long __pyx_t_19;
__Pyx_memviewslice __pyx_t_20 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_21;
__Pyx_memviewslice __pyx_t_22 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_23;
int __pyx_t_24;
int __pyx_t_25;
int __pyx_t_26;
__Pyx_memviewslice __pyx_t_27 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_t_28;
__Pyx_memviewslice __pyx_t_29 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("divide_and_conquer_cost_and_path", 0);
/* "radiotool/algorithms/par_build_table.pyx":334
* double[:] mv3, double[:] mv4, double[:] mv5, double[:] mv6):
*
* cdef int l = pen.shape[1] # out beats # <<<<<<<<<<<<<<
* cdef double[:] new_pen, tc_column
* cdef int i, opt_i, l_over_2, f_done, g_done
*/
__pyx_v_l = (__pyx_v_pen.shape[1]);
/* "radiotool/algorithms/par_build_table.pyx":337
* cdef double[:] new_pen, tc_column
* cdef int i, opt_i, l_over_2, f_done, g_done
* cdef double minval = -1.0 # <<<<<<<<<<<<<<
* cdef int prange_i, stride
*
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":353
* # opt_i_arr = ar2
*
* if l == 0: # <<<<<<<<<<<<<<
* pass
* elif l == 1:
*/
__pyx_t_1 = ((__pyx_v_l == 0) != 0);
if (__pyx_t_1) {
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":355
* if l == 0:
* pass
* elif l == 1: # <<<<<<<<<<<<<<
* pass
* elif l == 2 and start_beat != -1 and end_beat != -1:
*/
__pyx_t_1 = ((__pyx_v_l == 1) != 0);
if (__pyx_t_1) {
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":357
* elif l == 1:
* pass
* elif l == 2 and start_beat != -1 and end_beat != -1: # <<<<<<<<<<<<<<
* pass
* elif l == 2 and start_beat != -1:
*/
__pyx_t_1 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_1) {
__pyx_t_2 = ((__pyx_v_start_beat != -1) != 0);
if (__pyx_t_2) {
__pyx_t_3 = ((__pyx_v_end_beat != -1) != 0);
__pyx_t_4 = __pyx_t_3;
} else {
__pyx_t_4 = __pyx_t_2;
}
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":359
* elif l == 2 and start_beat != -1 and end_beat != -1:
* pass
* elif l == 2 and start_beat != -1: # <<<<<<<<<<<<<<
* new_pen = mv1
* get_pen_column(pen, 1, new_pen, offset, p)
*/
__pyx_t_2 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_2) {
__pyx_t_1 = ((__pyx_v_start_beat != -1) != 0);
__pyx_t_4 = __pyx_t_1;
} else {
__pyx_t_4 = __pyx_t_2;
}
if (__pyx_t_4) {
/* "radiotool/algorithms/par_build_table.pyx":360
* pass
* elif l == 2 and start_beat != -1:
* new_pen = mv1 # <<<<<<<<<<<<<<
* get_pen_column(pen, 1, new_pen, offset, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv1, 0);
__pyx_v_new_pen = __pyx_v_mv1;
/* "radiotool/algorithms/par_build_table.pyx":361
* elif l == 2 and start_beat != -1:
* new_pen = mv1
* get_pen_column(pen, 1, new_pen, offset, p) # <<<<<<<<<<<<<<
*
* tc_column = mv2
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, 1, __pyx_v_new_pen, __pyx_v_offset, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":363
* get_pen_column(pen, 1, new_pen, offset, p)
*
* tc_column = mv2 # <<<<<<<<<<<<<<
* get_tc_column(tc, start_beat, tc_column, 1, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv2, 0);
__pyx_v_tc_column = __pyx_v_mv2;
/* "radiotool/algorithms/par_build_table.pyx":364
*
* tc_column = mv2
* get_tc_column(tc, start_beat, tc_column, 1, p) # <<<<<<<<<<<<<<
*
* global_path[offset] = start_beat
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_start_beat, __pyx_v_tc_column, 1, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":366
* get_tc_column(tc, start_beat, tc_column, 1, p)
*
* global_path[offset] = start_beat # <<<<<<<<<<<<<<
*
* minval = -1.0
*/
__pyx_t_5 = __pyx_v_offset;
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_5 * __pyx_v_global_path.strides[0]) )) = __pyx_v_start_beat;
/* "radiotool/algorithms/par_build_table.pyx":368
* global_path[offset] = start_beat
*
* minval = -1.0 # <<<<<<<<<<<<<<
* opt_i = 0
* for i in range(tc_column.shape[0]):
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":369
*
* minval = -1.0
* opt_i = 0 # <<<<<<<<<<<<<<
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":370
* minval = -1.0
* opt_i = 0
* for i in range(tc_column.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
*/
__pyx_t_6 = (__pyx_v_tc_column.shape[0]);
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "radiotool/algorithms/par_build_table.pyx":371
* opt_i = 0
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval: # <<<<<<<<<<<<<<
* minval = tc_column[i] + new_pen[i]
* opt_i = i
*/
__pyx_t_4 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_4) {
__pyx_t_8 = __pyx_v_i;
__pyx_t_9 = __pyx_v_i;
__pyx_t_2 = ((((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_8 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_9 * __pyx_v_new_pen.strides[0]) )))) < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_2;
} else {
__pyx_t_1 = __pyx_t_4;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":372
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i] # <<<<<<<<<<<<<<
* opt_i = i
*
*/
__pyx_t_10 = __pyx_v_i;
__pyx_t_11 = __pyx_v_i;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_10 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_11 * __pyx_v_new_pen.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":373
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
* opt_i = i # <<<<<<<<<<<<<<
*
* # print "$ setting time %d to %d" % (offset + 1, opt_i)
*/
__pyx_v_opt_i = __pyx_v_i;
goto __pyx_L6;
}
__pyx_L6:;
}
/* "radiotool/algorithms/par_build_table.pyx":376
*
* # print "$ setting time %d to %d" % (offset + 1, opt_i)
* global_path[offset + 1] = opt_i # <<<<<<<<<<<<<<
*
* # global_path_cost[offset + 1] = N.min(tc_column + new_pen)
*/
__pyx_t_12 = (__pyx_v_offset + 1);
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_12 * __pyx_v_global_path.strides[0]) )) = __pyx_v_opt_i;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":379
*
* # global_path_cost[offset + 1] = N.min(tc_column + new_pen)
* elif l == 2 and end_beat != -1: # <<<<<<<<<<<<<<
* new_pen = mv1
* get_pen_column(pen, 0, new_pen, offset, p)
*/
__pyx_t_1 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_1) {
__pyx_t_4 = ((__pyx_v_end_beat != -1) != 0);
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
/* "radiotool/algorithms/par_build_table.pyx":380
* # global_path_cost[offset + 1] = N.min(tc_column + new_pen)
* elif l == 2 and end_beat != -1:
* new_pen = mv1 # <<<<<<<<<<<<<<
* get_pen_column(pen, 0, new_pen, offset, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv1, 0);
__pyx_v_new_pen = __pyx_v_mv1;
/* "radiotool/algorithms/par_build_table.pyx":381
* elif l == 2 and end_beat != -1:
* new_pen = mv1
* get_pen_column(pen, 0, new_pen, offset, p) # <<<<<<<<<<<<<<
*
* tc_column = mv2
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_pen_column(__pyx_v_pen, 0, __pyx_v_new_pen, __pyx_v_offset, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":383
* get_pen_column(pen, 0, new_pen, offset, p)
*
* tc_column = mv2 # <<<<<<<<<<<<<<
* get_tc_column(tc, end_beat, tc_column, 0, p)
*
*/
__PYX_INC_MEMVIEW(&__pyx_v_mv2, 0);
__pyx_v_tc_column = __pyx_v_mv2;
/* "radiotool/algorithms/par_build_table.pyx":384
*
* tc_column = mv2
* get_tc_column(tc, end_beat, tc_column, 0, p) # <<<<<<<<<<<<<<
*
* minval = -1.0
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_get_tc_column(__pyx_v_tc, __pyx_v_end_beat, __pyx_v_tc_column, 0, __pyx_v_p);
/* "radiotool/algorithms/par_build_table.pyx":386
* get_tc_column(tc, end_beat, tc_column, 0, p)
*
* minval = -1.0 # <<<<<<<<<<<<<<
* opt_i = 0
* for i in range(tc_column.shape[0]):
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":387
*
* minval = -1.0
* opt_i = 0 # <<<<<<<<<<<<<<
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":388
* minval = -1.0
* opt_i = 0
* for i in range(tc_column.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
*/
__pyx_t_6 = (__pyx_v_tc_column.shape[0]);
for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_6; __pyx_t_7+=1) {
__pyx_v_i = __pyx_t_7;
/* "radiotool/algorithms/par_build_table.pyx":389
* opt_i = 0
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval: # <<<<<<<<<<<<<<
* minval = tc_column[i] + new_pen[i]
* opt_i = i
*/
__pyx_t_2 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_2) {
__pyx_t_13 = __pyx_v_i;
__pyx_t_14 = __pyx_v_i;
__pyx_t_1 = ((((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_13 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_14 * __pyx_v_new_pen.strides[0]) )))) < __pyx_v_minval) != 0);
__pyx_t_4 = __pyx_t_1;
} else {
__pyx_t_4 = __pyx_t_2;
}
if (__pyx_t_4) {
/* "radiotool/algorithms/par_build_table.pyx":390
* for i in range(tc_column.shape[0]):
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i] # <<<<<<<<<<<<<<
* opt_i = i
*
*/
__pyx_t_15 = __pyx_v_i;
__pyx_t_16 = __pyx_v_i;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_tc_column.data + __pyx_t_15 * __pyx_v_tc_column.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_new_pen.data + __pyx_t_16 * __pyx_v_new_pen.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":391
* if minval == -1.0 or tc_column[i] + new_pen[i] < minval:
* minval = tc_column[i] + new_pen[i]
* opt_i = i # <<<<<<<<<<<<<<
*
* # print "* setting time %d to %d" % (offset, opt_i)
*/
__pyx_v_opt_i = __pyx_v_i;
goto __pyx_L9;
}
__pyx_L9:;
}
/* "radiotool/algorithms/par_build_table.pyx":394
*
* # print "* setting time %d to %d" % (offset, opt_i)
* global_path[offset] = opt_i # <<<<<<<<<<<<<<
* global_path[offset + 1] = end_beat
*
*/
__pyx_t_7 = __pyx_v_offset;
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_7 * __pyx_v_global_path.strides[0]) )) = __pyx_v_opt_i;
/* "radiotool/algorithms/par_build_table.pyx":395
* # print "* setting time %d to %d" % (offset, opt_i)
* global_path[offset] = opt_i
* global_path[offset + 1] = end_beat # <<<<<<<<<<<<<<
*
* # global_path_cost[offset] = N.min(tc_column + new_pen)
*/
__pyx_t_17 = (__pyx_v_offset + 1);
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_17 * __pyx_v_global_path.strides[0]) )) = __pyx_v_end_beat;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":398
*
* # global_path_cost[offset] = N.min(tc_column + new_pen)
* elif l == 2: # <<<<<<<<<<<<<<
* pass
* # opt_path = cost_and_path(tc, pen, start_beat, end_beat)
*/
__pyx_t_4 = ((__pyx_v_l == 2) != 0);
if (__pyx_t_4) {
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":404
*
* else:
* l_over_2 = l / 2 # <<<<<<<<<<<<<<
*
* # print "forward. start beat:", start_beat, "offset:", offset, "length:", pen[:, :l_over_2 + 1].shape[1]
*/
__pyx_v_l_over_2 = (__pyx_v_l / 2);
/* "radiotool/algorithms/par_build_table.pyx":409
* # print "backwrd. end beat:", end_beat, "offset:", offset + l_over_2, "length:", pen[:, l_over_2:].shape[1]
*
* for prange_i in parallel.prange(2, nogil=True): # <<<<<<<<<<<<<<
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint(
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
#endif
/*try:*/ {
if (1 == 0) abort();
{
int __pyx_parallel_temp0 = 0xbad0bad0;
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_19 = (2 - 0) / 1;
if (__pyx_t_19 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_21) firstprivate(__pyx_t_22, __pyx_t_20) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_prange_i) lastprivate(__pyx_v_prange_i)
#endif /* _OPENMP */
for (__pyx_t_18 = 0; __pyx_t_18 < __pyx_t_19; __pyx_t_18++){
if (__pyx_parallel_why < 2)
{
__pyx_v_prange_i = 0 + 1 * __pyx_t_18;
/* "radiotool/algorithms/par_build_table.pyx":413
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1: # <<<<<<<<<<<<<<
* backward_space_efficient_cost_with_duration_constraint(
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6)
*/
switch (__pyx_v_prange_i) {
/* "radiotool/algorithms/par_build_table.pyx":410
*
* for prange_i in parallel.prange(2, nogil=True):
* if prange_i == 0: # <<<<<<<<<<<<<<
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
*/
case 0:
/* "radiotool/algorithms/par_build_table.pyx":412
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3) # <<<<<<<<<<<<<<
* elif prange_i == 1:
* backward_space_efficient_cost_with_duration_constraint(
*/
__pyx_t_21 = -1;
__pyx_t_20.data = __pyx_v_pen.data;
__pyx_t_20.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_20, 0);
__pyx_t_20.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_20.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_20.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_20,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_21,
0,
(__pyx_v_l_over_2 + 1),
0,
0,
1,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 412; __pyx_clineno = __LINE__; goto __pyx_L15_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_space_efficient_cost_with_duration_constraint(__pyx_v_tc, __pyx_t_20, __pyx_v_start_beat, -1, __pyx_v_offset, __pyx_v_p, __pyx_v_f, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3);
/* "radiotool/algorithms/par_build_table.pyx":411
* for prange_i in parallel.prange(2, nogil=True):
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1:
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_20, 0);
break;
/* "radiotool/algorithms/par_build_table.pyx":413
* space_efficient_cost_with_duration_constraint(
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1: # <<<<<<<<<<<<<<
* backward_space_efficient_cost_with_duration_constraint(
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6)
*/
case 1:
/* "radiotool/algorithms/par_build_table.pyx":415
* elif prange_i == 1:
* backward_space_efficient_cost_with_duration_constraint(
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6) # <<<<<<<<<<<<<<
*
* # print "finding minimum"
*/
__pyx_t_21 = -1;
__pyx_t_22.data = __pyx_v_pen.data;
__pyx_t_22.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_22, 0);
__pyx_t_22.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_22.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_22.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_22,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_21,
__pyx_v_l_over_2,
0,
0,
1,
0,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 415; __pyx_clineno = __LINE__; goto __pyx_L15_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_backward_space_efficient_cost_with_duration_constraint(__pyx_v_tc, __pyx_t_22, -1, __pyx_v_end_beat, (__pyx_v_offset + __pyx_v_l_over_2), __pyx_v_p, __pyx_v_g, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":414
* tc, pen[:, :l_over_2 + 1], start_beat, -1, offset, p, f, mv1, mv2, mv3)
* elif prange_i == 1:
* backward_space_efficient_cost_with_duration_constraint( # <<<<<<<<<<<<<<
* tc, pen[:, l_over_2:], -1, end_beat, offset + l_over_2, p, g, mv4, mv5, mv6)
*
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_22, 0);
break;
default: break;
}
goto __pyx_L18;
__pyx_L15_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetch(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L17;
__pyx_L17:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_prange_i;
}
__pyx_L18:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__PYX_XDEC_MEMVIEW(&__pyx_t_22, 0);
__PYX_XDEC_MEMVIEW(&__pyx_t_20, 0);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_prange_i = __pyx_parallel_temp0;
switch (__pyx_parallel_why) {
case 3: goto __pyx_L10_return;
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestore(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L11_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "radiotool/algorithms/par_build_table.pyx":409
* # print "backwrd. end beat:", end_beat, "offset:", offset + l_over_2, "length:", pen[:, l_over_2:].shape[1]
*
* for prange_i in parallel.prange(2, nogil=True): # <<<<<<<<<<<<<<
* if prange_i == 0:
* space_efficient_cost_with_duration_constraint(
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L12;
}
__pyx_L10_return: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L0;
}
__pyx_L11_error: {
#ifdef WITH_THREAD
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L12:;
}
}
/* "radiotool/algorithms/par_build_table.pyx":450
*
* # ## -- OLD WAY -- ##
* minval = -1.0 # <<<<<<<<<<<<<<
* opt_i = 0
* for i in range(f.shape[0]):
*/
__pyx_v_minval = -1.0;
/* "radiotool/algorithms/par_build_table.pyx":451
* # ## -- OLD WAY -- ##
* minval = -1.0
* opt_i = 0 # <<<<<<<<<<<<<<
* for i in range(f.shape[0]):
* if minval == -1.0 or f[i] + g[i] < minval:
*/
__pyx_v_opt_i = 0;
/* "radiotool/algorithms/par_build_table.pyx":452
* minval = -1.0
* opt_i = 0
* for i in range(f.shape[0]): # <<<<<<<<<<<<<<
* if minval == -1.0 or f[i] + g[i] < minval:
* minval = f[i] + g[i]
*/
__pyx_t_6 = (__pyx_v_f.shape[0]);
for (__pyx_t_21 = 0; __pyx_t_21 < __pyx_t_6; __pyx_t_21+=1) {
__pyx_v_i = __pyx_t_21;
/* "radiotool/algorithms/par_build_table.pyx":453
* opt_i = 0
* for i in range(f.shape[0]):
* if minval == -1.0 or f[i] + g[i] < minval: # <<<<<<<<<<<<<<
* minval = f[i] + g[i]
* opt_i = i
*/
__pyx_t_4 = ((__pyx_v_minval == -1.0) != 0);
if (!__pyx_t_4) {
__pyx_t_23 = __pyx_v_i;
__pyx_t_24 = __pyx_v_i;
__pyx_t_2 = ((((*((double *) ( /* dim=0 */ (__pyx_v_f.data + __pyx_t_23 * __pyx_v_f.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_g.data + __pyx_t_24 * __pyx_v_g.strides[0]) )))) < __pyx_v_minval) != 0);
__pyx_t_1 = __pyx_t_2;
} else {
__pyx_t_1 = __pyx_t_4;
}
if (__pyx_t_1) {
/* "radiotool/algorithms/par_build_table.pyx":454
* for i in range(f.shape[0]):
* if minval == -1.0 or f[i] + g[i] < minval:
* minval = f[i] + g[i] # <<<<<<<<<<<<<<
* opt_i = i
*
*/
__pyx_t_25 = __pyx_v_i;
__pyx_t_26 = __pyx_v_i;
__pyx_v_minval = ((*((double *) ( /* dim=0 */ (__pyx_v_f.data + __pyx_t_25 * __pyx_v_f.strides[0]) ))) + (*((double *) ( /* dim=0 */ (__pyx_v_g.data + __pyx_t_26 * __pyx_v_g.strides[0]) ))));
/* "radiotool/algorithms/par_build_table.pyx":455
* if minval == -1.0 or f[i] + g[i] < minval:
* minval = f[i] + g[i]
* opt_i = i # <<<<<<<<<<<<<<
*
* # print "setting time %d to %d" % (l_over_2 + offset, opt_i)
*/
__pyx_v_opt_i = __pyx_v_i;
goto __pyx_L21;
}
__pyx_L21:;
}
/* "radiotool/algorithms/par_build_table.pyx":465
*
*
* global_path[l_over_2 + offset] = opt_i # <<<<<<<<<<<<<<
* # global_path_cost[l_over_2 + offset] = N.min(f + g)
*
*/
__pyx_t_21 = (__pyx_v_l_over_2 + __pyx_v_offset);
*((int *) ( /* dim=0 */ (__pyx_v_global_path.data + __pyx_t_21 * __pyx_v_global_path.strides[0]) )) = __pyx_v_opt_i;
/* "radiotool/algorithms/par_build_table.pyx":470
* # first half
* divide_and_conquer_cost_and_path(
* tc, pen[:, :l_over_2 + 1], start_beat, opt_i, offset, global_path, p, # <<<<<<<<<<<<<<
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
*/
__pyx_t_28 = -1;
__pyx_t_27.data = __pyx_v_pen.data;
__pyx_t_27.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_27, 0);
__pyx_t_27.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_27.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_27.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_27,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_28,
0,
(__pyx_v_l_over_2 + 1),
0,
0,
1,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__pyx_v_tc, __pyx_t_27, __pyx_v_start_beat, __pyx_v_opt_i, __pyx_v_offset, __pyx_v_global_path, __pyx_v_p, __pyx_v_f, __pyx_v_g, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":469
*
* # first half
* divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* tc, pen[:, :l_over_2 + 1], start_beat, opt_i, offset, global_path, p,
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_27, 1);
/* "radiotool/algorithms/par_build_table.pyx":475
* # second half
* divide_and_conquer_cost_and_path(
* tc, pen[:, l_over_2:], opt_i, end_beat, l_over_2 + offset, global_path, p, # <<<<<<<<<<<<<<
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
*/
__pyx_t_28 = -1;
__pyx_t_29.data = __pyx_v_pen.data;
__pyx_t_29.memview = __pyx_v_pen.memview;
__PYX_INC_MEMVIEW(&__pyx_t_29, 0);
__pyx_t_29.shape[0] = __pyx_v_pen.shape[0];
__pyx_t_29.strides[0] = __pyx_v_pen.strides[0];
__pyx_t_29.suboffsets[0] = -1;
if (unlikely(__pyx_memoryview_slice_memviewslice(
&__pyx_t_29,
__pyx_v_pen.shape[1], __pyx_v_pen.strides[1], __pyx_v_pen.suboffsets[1],
1,
1,
&__pyx_t_28,
__pyx_v_l_over_2,
0,
0,
1,
0,
0,
1) < 0))
{
{__pyx_filename = __pyx_f[0]; __pyx_lineno = 475; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__pyx_v_tc, __pyx_t_29, __pyx_v_opt_i, __pyx_v_end_beat, (__pyx_v_l_over_2 + __pyx_v_offset), __pyx_v_global_path, __pyx_v_p, __pyx_v_f, __pyx_v_g, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":474
*
* # second half
* divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* tc, pen[:, l_over_2:], opt_i, end_beat, l_over_2 + offset, global_path, p,
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*/
__PYX_XDEC_MEMVIEW(&__pyx_t_29, 1);
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":478
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
* return # <<<<<<<<<<<<<<
*
*
*/
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":328
*
*
* cdef void divide_and_conquer_cost_and_path( # <<<<<<<<<<<<<<
* double[:, :] tc, double[:, :] pen, int start_beat, int end_beat, int offset,
* int[:] global_path, Params p,
*/
/* function exit code */
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_20, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_22, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_27, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_29, 1);
__Pyx_WriteUnraisable("radiotool.algorithms.par_build_table.divide_and_conquer_cost_and_path", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_new_pen, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_tc_column, 1);
__Pyx_RefNannyFinishContext();
}
/* "radiotool/algorithms/par_build_table.pyx":481
*
*
* cpdef int[:] build_table(double[:, :] trans_cost, double[:, :] penalty, # <<<<<<<<<<<<<<
* int min_beats=-1, int max_beats=-1, int first_pause=-1):
*
*/
static PyObject *__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static __Pyx_memviewslice __pyx_f_9radiotool_10algorithms_15par_build_table_build_table(__Pyx_memviewslice __pyx_v_trans_cost, __Pyx_memviewslice __pyx_v_penalty, CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table *__pyx_optional_args) {
int __pyx_v_min_beats = ((int)-1);
int __pyx_v_max_beats = ((int)-1);
int __pyx_v_first_pause = ((int)-1);
int __pyx_v_max_beats_with_padding;
struct __pyx_t_9radiotool_10algorithms_15par_build_table_Params __pyx_v_p;
arrayobject *__pyx_v_dtemplate = 0;
arrayobject *__pyx_v_array1 = 0;
arrayobject *__pyx_v_array2 = 0;
arrayobject *__pyx_v_array3 = 0;
arrayobject *__pyx_v_array4 = 0;
arrayobject *__pyx_v_array5 = 0;
arrayobject *__pyx_v_array6 = 0;
arrayobject *__pyx_v_array7 = 0;
arrayobject *__pyx_v_array8 = 0;
__Pyx_memviewslice __pyx_v_mv1 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv2 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv3 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv4 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv5 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_mv6 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_f = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_g = { 0, 0, { 0 }, { 0 }, { 0 } };
arrayobject *__pyx_v_ar = 0;
arrayobject *__pyx_v_template = 0;
__Pyx_memviewslice __pyx_v_global_path = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_r = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
__Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("build_table", 0);
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_min_beats = __pyx_optional_args->min_beats;
if (__pyx_optional_args->__pyx_n > 1) {
__pyx_v_max_beats = __pyx_optional_args->max_beats;
if (__pyx_optional_args->__pyx_n > 2) {
__pyx_v_first_pause = __pyx_optional_args->first_pause;
}
}
}
}
/* "radiotool/algorithms/par_build_table.pyx":486
* cdef int max_beats_with_padding, i
*
* if max_beats != -1 and min_beats != -1: # <<<<<<<<<<<<<<
* # max_beats_with_padding = min_beats + max_beats
* max_beats_with_padding = max_beats
*/
__pyx_t_1 = ((__pyx_v_max_beats != -1) != 0);
if (__pyx_t_1) {
__pyx_t_2 = ((__pyx_v_min_beats != -1) != 0);
__pyx_t_3 = __pyx_t_2;
} else {
__pyx_t_3 = __pyx_t_1;
}
if (__pyx_t_3) {
/* "radiotool/algorithms/par_build_table.pyx":488
* if max_beats != -1 and min_beats != -1:
* # max_beats_with_padding = min_beats + max_beats
* max_beats_with_padding = max_beats # <<<<<<<<<<<<<<
* elif max_beats != -1:
* # 4? One measures of padding? Just a thought
*/
__pyx_v_max_beats_with_padding = __pyx_v_max_beats;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":489
* # max_beats_with_padding = min_beats + max_beats
* max_beats_with_padding = max_beats
* elif max_beats != -1: # <<<<<<<<<<<<<<
* # 4? One measures of padding? Just a thought
* max_beats_with_padding = max_beats
*/
__pyx_t_3 = ((__pyx_v_max_beats != -1) != 0);
if (__pyx_t_3) {
/* "radiotool/algorithms/par_build_table.pyx":491
* elif max_beats != -1:
* # 4? One measures of padding? Just a thought
* max_beats_with_padding = max_beats # <<<<<<<<<<<<<<
* elif min_beats != -1:
* max_beats = -1
*/
__pyx_v_max_beats_with_padding = __pyx_v_max_beats;
goto __pyx_L3;
}
/* "radiotool/algorithms/par_build_table.pyx":492
* # 4? One measures of padding? Just a thought
* max_beats_with_padding = max_beats
* elif min_beats != -1: # <<<<<<<<<<<<<<
* max_beats = -1
* max_beats_with_padding = min_beats
*/
__pyx_t_3 = ((__pyx_v_min_beats != -1) != 0);
if (__pyx_t_3) {
/* "radiotool/algorithms/par_build_table.pyx":493
* max_beats_with_padding = max_beats
* elif min_beats != -1:
* max_beats = -1 # <<<<<<<<<<<<<<
* max_beats_with_padding = min_beats
* else:
*/
__pyx_v_max_beats = -1;
/* "radiotool/algorithms/par_build_table.pyx":494
* elif min_beats != -1:
* max_beats = -1
* max_beats_with_padding = min_beats # <<<<<<<<<<<<<<
* else:
* max_beats_with_padding = 1
*/
__pyx_v_max_beats_with_padding = __pyx_v_min_beats;
goto __pyx_L3;
}
/*else*/ {
/* "radiotool/algorithms/par_build_table.pyx":496
* max_beats_with_padding = min_beats
* else:
* max_beats_with_padding = 1 # <<<<<<<<<<<<<<
* max_beats = 1
* min_beats = 0
*/
__pyx_v_max_beats_with_padding = 1;
/* "radiotool/algorithms/par_build_table.pyx":497
* else:
* max_beats_with_padding = 1
* max_beats = 1 # <<<<<<<<<<<<<<
* min_beats = 0
*
*/
__pyx_v_max_beats = 1;
/* "radiotool/algorithms/par_build_table.pyx":498
* max_beats_with_padding = 1
* max_beats = 1
* min_beats = 0 # <<<<<<<<<<<<<<
*
* cdef Params p
*/
__pyx_v_min_beats = 0;
}
__pyx_L3:;
/* "radiotool/algorithms/par_build_table.pyx":501
*
* cdef Params p
* p.pen_val = 99999999.0 # <<<<<<<<<<<<<<
* p.p0 = first_pause
* p.n_beats = p.p0
*/
__pyx_v_p.pen_val = 99999999.0;
/* "radiotool/algorithms/par_build_table.pyx":502
* cdef Params p
* p.pen_val = 99999999.0
* p.p0 = first_pause # <<<<<<<<<<<<<<
* p.n_beats = p.p0
* p.n_pauses = trans_cost.shape[0] - p.p0
*/
__pyx_v_p.p0 = __pyx_v_first_pause;
/* "radiotool/algorithms/par_build_table.pyx":503
* p.pen_val = 99999999.0
* p.p0 = first_pause
* p.n_beats = p.p0 # <<<<<<<<<<<<<<
* p.n_pauses = trans_cost.shape[0] - p.p0
* p.min_beats = min_beats
*/
__pyx_t_4 = __pyx_v_p.p0;
__pyx_v_p.n_beats = __pyx_t_4;
/* "radiotool/algorithms/par_build_table.pyx":504
* p.p0 = first_pause
* p.n_beats = p.p0
* p.n_pauses = trans_cost.shape[0] - p.p0 # <<<<<<<<<<<<<<
* p.min_beats = min_beats
* p.max_beats = max_beats
*/
__pyx_v_p.n_pauses = ((__pyx_v_trans_cost.shape[0]) - __pyx_v_p.p0);
/* "radiotool/algorithms/par_build_table.pyx":505
* p.n_beats = p.p0
* p.n_pauses = trans_cost.shape[0] - p.p0
* p.min_beats = min_beats # <<<<<<<<<<<<<<
* p.max_beats = max_beats
* p.max_beats_with_padding = max_beats_with_padding
*/
__pyx_v_p.min_beats = __pyx_v_min_beats;
/* "radiotool/algorithms/par_build_table.pyx":506
* p.n_pauses = trans_cost.shape[0] - p.p0
* p.min_beats = min_beats
* p.max_beats = max_beats # <<<<<<<<<<<<<<
* p.max_beats_with_padding = max_beats_with_padding
* p.p0_full = p.n_beats * p.max_beats_with_padding
*/
__pyx_v_p.max_beats = __pyx_v_max_beats;
/* "radiotool/algorithms/par_build_table.pyx":507
* p.min_beats = min_beats
* p.max_beats = max_beats
* p.max_beats_with_padding = max_beats_with_padding # <<<<<<<<<<<<<<
* p.p0_full = p.n_beats * p.max_beats_with_padding
* p.all_full = p.p0_full + p.n_pauses
*/
__pyx_v_p.max_beats_with_padding = __pyx_v_max_beats_with_padding;
/* "radiotool/algorithms/par_build_table.pyx":508
* p.max_beats = max_beats
* p.max_beats_with_padding = max_beats_with_padding
* p.p0_full = p.n_beats * p.max_beats_with_padding # <<<<<<<<<<<<<<
* p.all_full = p.p0_full + p.n_pauses
*
*/
__pyx_v_p.p0_full = (__pyx_v_p.n_beats * __pyx_v_p.max_beats_with_padding);
/* "radiotool/algorithms/par_build_table.pyx":509
* p.max_beats_with_padding = max_beats_with_padding
* p.p0_full = p.n_beats * p.max_beats_with_padding
* p.all_full = p.p0_full + p.n_pauses # <<<<<<<<<<<<<<
*
* # double arrays for use throughout the computation
*/
__pyx_v_p.all_full = (__pyx_v_p.p0_full + __pyx_v_p.n_pauses);
/* "radiotool/algorithms/par_build_table.pyx":512
*
* # double arrays for use throughout the computation
* cdef array dtemplate = array('d') # <<<<<<<<<<<<<<
* cdef array array1, array2, array3, array4, array5, array6, array7, array8
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_7cpython_5array_array)), __pyx_tuple_, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_dtemplate = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":515
* cdef array array1, array2, array3, array4, array5, array6, array7, array8
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
* array1 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array2 = clone(dtemplate, p.all_full, False)
* array3 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 515; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array1 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":516
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
* array1 = clone(dtemplate, p.all_full, False)
* array2 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array3 = clone(dtemplate, p.all_full, False)
* array4 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 516; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array2 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":517
* array1 = clone(dtemplate, p.all_full, False)
* array2 = clone(dtemplate, p.all_full, False)
* array3 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array4 = clone(dtemplate, p.all_full, False)
* array5 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 517; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array3 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":518
* array2 = clone(dtemplate, p.all_full, False)
* array3 = clone(dtemplate, p.all_full, False)
* array4 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array5 = clone(dtemplate, p.all_full, False)
* array6 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 518; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array4 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":519
* array3 = clone(dtemplate, p.all_full, False)
* array4 = clone(dtemplate, p.all_full, False)
* array5 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array6 = clone(dtemplate, p.all_full, False)
* array7 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 519; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array5 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":520
* array4 = clone(dtemplate, p.all_full, False)
* array5 = clone(dtemplate, p.all_full, False)
* array6 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array7 = clone(dtemplate, p.all_full, False)
* array8 = clone(dtemplate, p.all_full, False)
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 520; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array6 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":521
* array5 = clone(dtemplate, p.all_full, False)
* array6 = clone(dtemplate, p.all_full, False)
* array7 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* array8 = clone(dtemplate, p.all_full, False)
* f = array1
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 521; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array7 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":522
* array6 = clone(dtemplate, p.all_full, False)
* array7 = clone(dtemplate, p.all_full, False)
* array8 = clone(dtemplate, p.all_full, False) # <<<<<<<<<<<<<<
* f = array1
* g = array2
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_dtemplate, __pyx_v_p.all_full, 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_array8 = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":523
* array7 = clone(dtemplate, p.all_full, False)
* array8 = clone(dtemplate, p.all_full, False)
* f = array1 # <<<<<<<<<<<<<<
* g = array2
* mv1 = array3
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array1));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 523; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":524
* array8 = clone(dtemplate, p.all_full, False)
* f = array1
* g = array2 # <<<<<<<<<<<<<<
* mv1 = array3
* mv2 = array4
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array2));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 524; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_g = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":525
* f = array1
* g = array2
* mv1 = array3 # <<<<<<<<<<<<<<
* mv2 = array4
* mv3 = array5
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array3));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 525; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv1 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":526
* g = array2
* mv1 = array3
* mv2 = array4 # <<<<<<<<<<<<<<
* mv3 = array5
* mv4 = array6
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array4));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 526; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv2 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":527
* mv1 = array3
* mv2 = array4
* mv3 = array5 # <<<<<<<<<<<<<<
* mv4 = array6
* mv5 = array7
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array5));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 527; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv3 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":528
* mv2 = array4
* mv3 = array5
* mv4 = array6 # <<<<<<<<<<<<<<
* mv5 = array7
* mv6 = array8
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array6));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 528; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv4 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":529
* mv3 = array5
* mv4 = array6
* mv5 = array7 # <<<<<<<<<<<<<<
* mv6 = array8
*
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array7));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv5 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":530
* mv4 = array6
* mv5 = array7
* mv6 = array8 # <<<<<<<<<<<<<<
*
* cdef array ar, template = array('i')
*/
__pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(((PyObject *)__pyx_v_array8));
if (unlikely(!__pyx_t_6.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 530; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mv6 = __pyx_t_6;
__pyx_t_6.memview = NULL;
__pyx_t_6.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":532
* mv6 = array8
*
* cdef array ar, template = array('i') # <<<<<<<<<<<<<<
* ar = clone(template, penalty.shape[1], False)
* cdef int[:] global_path = ar
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)__pyx_ptype_7cpython_5array_array)), __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_template = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":533
*
* cdef array ar, template = array('i')
* ar = clone(template, penalty.shape[1], False) # <<<<<<<<<<<<<<
* cdef int[:] global_path = ar
*
*/
__pyx_t_5 = ((PyObject *)__pyx_f_7cpython_5array_clone(__pyx_v_template, (__pyx_v_penalty.shape[1]), 0)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 533; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_v_ar = ((arrayobject *)__pyx_t_5);
__pyx_t_5 = 0;
/* "radiotool/algorithms/par_build_table.pyx":534
* cdef array ar, template = array('i')
* ar = clone(template, penalty.shape[1], False)
* cdef int[:] global_path = ar # <<<<<<<<<<<<<<
*
* divide_and_conquer_cost_and_path(trans_cost, penalty, -1, -1, 0, global_path, p,
*/
__pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(((PyObject *)__pyx_v_ar));
if (unlikely(!__pyx_t_7.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 534; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_global_path = __pyx_t_7;
__pyx_t_7.memview = NULL;
__pyx_t_7.data = NULL;
/* "radiotool/algorithms/par_build_table.pyx":536
* cdef int[:] global_path = ar
*
* divide_and_conquer_cost_and_path(trans_cost, penalty, -1, -1, 0, global_path, p, # <<<<<<<<<<<<<<
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
*/
__pyx_f_9radiotool_10algorithms_15par_build_table_divide_and_conquer_cost_and_path(__pyx_v_trans_cost, __pyx_v_penalty, -1, -1, 0, __pyx_v_global_path, __pyx_v_p, __pyx_v_f, __pyx_v_g, __pyx_v_mv1, __pyx_v_mv2, __pyx_v_mv3, __pyx_v_mv4, __pyx_v_mv5, __pyx_v_mv6);
/* "radiotool/algorithms/par_build_table.pyx":539
* f, g, mv1, mv2, mv3, mv4, mv5, mv6)
*
* return global_path # <<<<<<<<<<<<<<
*/
__PYX_INC_MEMVIEW(&__pyx_v_global_path, 0);
__pyx_r = __pyx_v_global_path;
goto __pyx_L0;
/* "radiotool/algorithms/par_build_table.pyx":481
*
*
* cpdef int[:] build_table(double[:, :] trans_cost, double[:, :] penalty, # <<<<<<<<<<<<<<
* int min_beats=-1, int max_beats=-1, int first_pause=-1):
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__PYX_XDEC_MEMVIEW(&__pyx_t_6, 1);
__PYX_XDEC_MEMVIEW(&__pyx_t_7, 1);
__pyx_r.data = NULL;
__pyx_r.memview = NULL;
__Pyx_AddTraceback("radiotool.algorithms.par_build_table.build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
goto __pyx_L2;
__pyx_L0:;
if (unlikely(!__pyx_r.memview)) {
PyErr_SetString(PyExc_TypeError,"Memoryview return value is not initialized");
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_dtemplate);
__Pyx_XDECREF((PyObject *)__pyx_v_array1);
__Pyx_XDECREF((PyObject *)__pyx_v_array2);
__Pyx_XDECREF((PyObject *)__pyx_v_array3);
__Pyx_XDECREF((PyObject *)__pyx_v_array4);
__Pyx_XDECREF((PyObject *)__pyx_v_array5);
__Pyx_XDECREF((PyObject *)__pyx_v_array6);
__Pyx_XDECREF((PyObject *)__pyx_v_array7);
__Pyx_XDECREF((PyObject *)__pyx_v_array8);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv1, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv2, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv3, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv4, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv5, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_mv6, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_f, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_g, 1);
__Pyx_XDECREF((PyObject *)__pyx_v_ar);
__Pyx_XDECREF((PyObject *)__pyx_v_template);
__PYX_XDEC_MEMVIEW(&__pyx_v_global_path, 1);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* Python wrapper */
static PyObject *__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyObject *__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_trans_cost = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_penalty = { 0, 0, { 0 }, { 0 }, { 0 } };
int __pyx_v_min_beats;
int __pyx_v_max_beats;
int __pyx_v_first_pause;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("build_table (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_trans_cost,&__pyx_n_s_penalty,&__pyx_n_s_min_beats,&__pyx_n_s_max_beats,&__pyx_n_s_first_pause,0};
PyObject* values[5] = {0,0,0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_trans_cost)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_penalty)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("build_table", 0, 2, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_min_beats);
if (value) { values[2] = value; kw_args--; }
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_max_beats);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_first_pause);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "build_table") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_trans_cost = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[0]); if (unlikely(!__pyx_v_trans_cost.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_penalty = __Pyx_PyObject_to_MemoryviewSlice_dsds_double(values[1]); if (unlikely(!__pyx_v_penalty.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[2]) {
__pyx_v_min_beats = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_min_beats == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_min_beats = ((int)-1);
}
if (values[3]) {
__pyx_v_max_beats = __Pyx_PyInt_As_int(values[3]); if (unlikely((__pyx_v_max_beats == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_max_beats = ((int)-1);
}
if (values[4]) {
__pyx_v_first_pause = __Pyx_PyInt_As_int(values[4]); if (unlikely((__pyx_v_first_pause == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 482; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_first_pause = ((int)-1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("build_table", 0, 2, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("radiotool.algorithms.par_build_table.build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_9radiotool_10algorithms_15par_build_table_build_table(__pyx_self, __pyx_v_trans_cost, __pyx_v_penalty, __pyx_v_min_beats, __pyx_v_max_beats, __pyx_v_first_pause);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_9radiotool_10algorithms_15par_build_table_build_table(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_trans_cost, __Pyx_memviewslice __pyx_v_penalty, int __pyx_v_min_beats, int __pyx_v_max_beats, int __pyx_v_first_pause) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_opt_args_9radiotool_10algorithms_15par_build_table_build_table __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("build_table", 0);
__Pyx_XDECREF(__pyx_r);
if (unlikely(!__pyx_v_trans_cost.memview)) { __Pyx_RaiseUnboundLocalError("trans_cost"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
if (unlikely(!__pyx_v_penalty.memview)) { __Pyx_RaiseUnboundLocalError("penalty"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
__pyx_t_2.__pyx_n = 3;
__pyx_t_2.min_beats = __pyx_v_min_beats;
__pyx_t_2.max_beats = __pyx_v_max_beats;
__pyx_t_2.first_pause = __pyx_v_first_pause;
__pyx_t_1 = __pyx_f_9radiotool_10algorithms_15par_build_table_build_table(__pyx_v_trans_cost, __pyx_v_penalty, 0, &__pyx_t_2); if (unlikely(!__pyx_t_1.memview)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_t_1, 1, (PyObject *(*)(char *)) __pyx_memview_get_int, (int (*)(char *, PyObject *)) __pyx_memview_set_int, 0);; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 481; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__PYX_XDEC_MEMVIEW(&__pyx_t_1, 1);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__PYX_XDEC_MEMVIEW(&__pyx_t_1, 1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("radiotool.algorithms.par_build_table.build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_trans_cost, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_penalty, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":91
* __data_union data
*
* def __getbuffer__(self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_7cpython_5array_5array_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_7cpython_5array_5array_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_7cpython_5array_5array___getbuffer__(((arrayobject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_7cpython_5array_5array___getbuffer__(arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info, CYTHON_UNUSED int __pyx_v_flags) {
PyObject *__pyx_v_item_count = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
char *__pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "cpython/array.pxd":96
* # In particular strided access is always provided regardless
* # of flags
* item_count = Py_SIZE(self) # <<<<<<<<<<<<<<
*
* info.suboffsets = NULL
*/
__pyx_t_1 = PyInt_FromSsize_t(Py_SIZE(((PyObject *)__pyx_v_self))); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_item_count = __pyx_t_1;
__pyx_t_1 = 0;
/* "cpython/array.pxd":98
* item_count = Py_SIZE(self)
*
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.buf = self.data.as_chars
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "cpython/array.pxd":99
*
* info.suboffsets = NULL
* info.buf = self.data.as_chars # <<<<<<<<<<<<<<
* info.readonly = 0
* info.ndim = 1
*/
__pyx_t_2 = __pyx_v_self->data.as_chars;
__pyx_v_info->buf = __pyx_t_2;
/* "cpython/array.pxd":100
* info.suboffsets = NULL
* info.buf = self.data.as_chars
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.ndim = 1
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float)
*/
__pyx_v_info->readonly = 0;
/* "cpython/array.pxd":101
* info.buf = self.data.as_chars
* info.readonly = 0
* info.ndim = 1 # <<<<<<<<<<<<<<
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float)
* info.len = info.itemsize * item_count
*/
__pyx_v_info->ndim = 1;
/* "cpython/array.pxd":102
* info.readonly = 0
* info.ndim = 1
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float) # <<<<<<<<<<<<<<
* info.len = info.itemsize * item_count
*
*/
__pyx_t_3 = __pyx_v_self->ob_descr->itemsize;
__pyx_v_info->itemsize = __pyx_t_3;
/* "cpython/array.pxd":103
* info.ndim = 1
* info.itemsize = self.ob_descr.itemsize # e.g. sizeof(float)
* info.len = info.itemsize * item_count # <<<<<<<<<<<<<<
*
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2)
*/
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_info->itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = PyNumber_Multiply(__pyx_t_1, __pyx_v_item_count); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_t_4); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 103; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_v_info->len = __pyx_t_5;
/* "cpython/array.pxd":105
* info.len = info.itemsize * item_count
*
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2) # <<<<<<<<<<<<<<
* if not info.shape:
* raise MemoryError()
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyMem_Malloc(((sizeof(Py_ssize_t)) + 2)));
/* "cpython/array.pxd":106
*
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2)
* if not info.shape: # <<<<<<<<<<<<<<
* raise MemoryError()
* info.shape[0] = item_count # constant regardless of resizing
*/
__pyx_t_6 = ((!(__pyx_v_info->shape != 0)) != 0);
if (__pyx_t_6) {
/* "cpython/array.pxd":107
* info.shape = <Py_ssize_t*> PyMem_Malloc(sizeof(Py_ssize_t) + 2)
* if not info.shape:
* raise MemoryError() # <<<<<<<<<<<<<<
* info.shape[0] = item_count # constant regardless of resizing
* info.strides = &info.itemsize
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "cpython/array.pxd":108
* if not info.shape:
* raise MemoryError()
* info.shape[0] = item_count # constant regardless of resizing # <<<<<<<<<<<<<<
* info.strides = &info.itemsize
*
*/
__pyx_t_5 = __Pyx_PyIndex_AsSsize_t(__pyx_v_item_count); if (unlikely((__pyx_t_5 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 108; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
(__pyx_v_info->shape[0]) = __pyx_t_5;
/* "cpython/array.pxd":109
* raise MemoryError()
* info.shape[0] = item_count # constant regardless of resizing
* info.strides = &info.itemsize # <<<<<<<<<<<<<<
*
* info.format = <char*> (info.shape + 1)
*/
__pyx_v_info->strides = (&__pyx_v_info->itemsize);
/* "cpython/array.pxd":111
* info.strides = &info.itemsize
*
* info.format = <char*> (info.shape + 1) # <<<<<<<<<<<<<<
* info.format[0] = self.ob_descr.typecode
* info.format[1] = 0
*/
__pyx_v_info->format = ((char *)(__pyx_v_info->shape + 1));
/* "cpython/array.pxd":112
*
* info.format = <char*> (info.shape + 1)
* info.format[0] = self.ob_descr.typecode # <<<<<<<<<<<<<<
* info.format[1] = 0
* info.obj = self
*/
__pyx_t_3 = __pyx_v_self->ob_descr->typecode;
(__pyx_v_info->format[0]) = __pyx_t_3;
/* "cpython/array.pxd":113
* info.format = <char*> (info.shape + 1)
* info.format[0] = self.ob_descr.typecode
* info.format[1] = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
(__pyx_v_info->format[1]) = 0;
/* "cpython/array.pxd":114
* info.format[0] = self.ob_descr.typecode
* info.format[1] = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* def __releasebuffer__(self, Py_buffer* info):
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "cpython/array.pxd":91
* __data_union data
*
* def __getbuffer__(self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("cpython.array.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF(__pyx_v_item_count);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":116
* info.obj = self
*
* def __releasebuffer__(self, Py_buffer* info): # <<<<<<<<<<<<<<
* PyMem_Free(info.shape)
*
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_7cpython_5array_5array_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_7cpython_5array_5array_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_7cpython_5array_5array_2__releasebuffer__(((arrayobject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_7cpython_5array_5array_2__releasebuffer__(CYTHON_UNUSED arrayobject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "cpython/array.pxd":117
*
* def __releasebuffer__(self, Py_buffer* info):
* PyMem_Free(info.shape) # <<<<<<<<<<<<<<
*
* array newarrayobject(PyTypeObject* type, Py_ssize_t size, arraydescr *descr)
*/
PyMem_Free(__pyx_v_info->shape);
/* "cpython/array.pxd":116
* info.obj = self
*
* def __releasebuffer__(self, Py_buffer* info): # <<<<<<<<<<<<<<
* PyMem_Free(info.shape)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "cpython/array.pxd":128
*
*
* cdef inline array clone(array template, Py_ssize_t length, bint zero): # <<<<<<<<<<<<<<
* """ fast creation of a new array, given a template array.
* type will be same as template.
*/
static CYTHON_INLINE arrayobject *__pyx_f_7cpython_5array_clone(arrayobject *__pyx_v_template, Py_ssize_t __pyx_v_length, int __pyx_v_zero) {
arrayobject *__pyx_v_op = NULL;
arrayobject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("clone", 0);
/* "cpython/array.pxd":132
* type will be same as template.
* if zero is true, new array will be initialized with zeroes."""
* op = newarrayobject(Py_TYPE(template), length, template.ob_descr) # <<<<<<<<<<<<<<
* if zero and op is not None:
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize)
*/
__pyx_t_1 = ((PyObject *)newarrayobject(Py_TYPE(((PyObject *)__pyx_v_template)), __pyx_v_length, __pyx_v_template->ob_descr)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_op = ((arrayobject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "cpython/array.pxd":133
* if zero is true, new array will be initialized with zeroes."""
* op = newarrayobject(Py_TYPE(template), length, template.ob_descr)
* if zero and op is not None: # <<<<<<<<<<<<<<
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize)
* return op
*/
if ((__pyx_v_zero != 0)) {
__pyx_t_2 = (((PyObject *)__pyx_v_op) != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
} else {
__pyx_t_3 = (__pyx_v_zero != 0);
}
if (__pyx_t_3) {
/* "cpython/array.pxd":134
* op = newarrayobject(Py_TYPE(template), length, template.ob_descr)
* if zero and op is not None:
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize) # <<<<<<<<<<<<<<
* return op
*
*/
memset(__pyx_v_op->data.as_chars, 0, (__pyx_v_length * __pyx_v_op->ob_descr->itemsize));
goto __pyx_L3;
}
__pyx_L3:;
/* "cpython/array.pxd":135
* if zero and op is not None:
* memset(op.data.as_chars, 0, length * op.ob_descr.itemsize)
* return op # <<<<<<<<<<<<<<
*
* cdef inline array copy(array self):
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_op));
__pyx_r = __pyx_v_op;
goto __pyx_L0;
/* "cpython/array.pxd":128
*
*
* cdef inline array clone(array template, Py_ssize_t length, bint zero): # <<<<<<<<<<<<<<
* """ fast creation of a new array, given a template array.
* type will be same as template.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("cpython.array.clone", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_op);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":137
* return op
*
* cdef inline array copy(array self): # <<<<<<<<<<<<<<
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
*/
static CYTHON_INLINE arrayobject *__pyx_f_7cpython_5array_copy(arrayobject *__pyx_v_self) {
arrayobject *__pyx_v_op = NULL;
arrayobject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "cpython/array.pxd":139
* cdef inline array copy(array self):
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr) # <<<<<<<<<<<<<<
* memcpy(op.data.as_chars, self.data.as_chars, Py_SIZE(op) * op.ob_descr.itemsize)
* return op
*/
__pyx_t_1 = ((PyObject *)newarrayobject(Py_TYPE(((PyObject *)__pyx_v_self)), Py_SIZE(((PyObject *)__pyx_v_self)), __pyx_v_self->ob_descr)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_op = ((arrayobject *)__pyx_t_1);
__pyx_t_1 = 0;
/* "cpython/array.pxd":140
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
* memcpy(op.data.as_chars, self.data.as_chars, Py_SIZE(op) * op.ob_descr.itemsize) # <<<<<<<<<<<<<<
* return op
*
*/
memcpy(__pyx_v_op->data.as_chars, __pyx_v_self->data.as_chars, (Py_SIZE(((PyObject *)__pyx_v_op)) * __pyx_v_op->ob_descr->itemsize));
/* "cpython/array.pxd":141
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
* memcpy(op.data.as_chars, self.data.as_chars, Py_SIZE(op) * op.ob_descr.itemsize)
* return op # <<<<<<<<<<<<<<
*
* cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1:
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_op));
__pyx_r = __pyx_v_op;
goto __pyx_L0;
/* "cpython/array.pxd":137
* return op
*
* cdef inline array copy(array self): # <<<<<<<<<<<<<<
* """ make a copy of an array. """
* op = newarrayobject(Py_TYPE(self), Py_SIZE(self), self.ob_descr)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("cpython.array.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_op);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":143
* return op
*
* cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1: # <<<<<<<<<<<<<<
* """ efficent appending of new stuff of same type
* (e.g. of same array type)
*/
static CYTHON_INLINE int __pyx_f_7cpython_5array_extend_buffer(arrayobject *__pyx_v_self, char *__pyx_v_stuff, Py_ssize_t __pyx_v_n) {
Py_ssize_t __pyx_v_itemsize;
Py_ssize_t __pyx_v_origsize;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("extend_buffer", 0);
/* "cpython/array.pxd":147
* (e.g. of same array type)
* n: number of elements (not number of bytes!) """
* cdef Py_ssize_t itemsize = self.ob_descr.itemsize # <<<<<<<<<<<<<<
* cdef Py_ssize_t origsize = Py_SIZE(self)
* resize_smart(self, origsize + n)
*/
__pyx_t_1 = __pyx_v_self->ob_descr->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "cpython/array.pxd":148
* n: number of elements (not number of bytes!) """
* cdef Py_ssize_t itemsize = self.ob_descr.itemsize
* cdef Py_ssize_t origsize = Py_SIZE(self) # <<<<<<<<<<<<<<
* resize_smart(self, origsize + n)
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize)
*/
__pyx_v_origsize = Py_SIZE(((PyObject *)__pyx_v_self));
/* "cpython/array.pxd":149
* cdef Py_ssize_t itemsize = self.ob_descr.itemsize
* cdef Py_ssize_t origsize = Py_SIZE(self)
* resize_smart(self, origsize + n) # <<<<<<<<<<<<<<
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize)
* return 0
*/
__pyx_t_1 = resize_smart(__pyx_v_self, (__pyx_v_origsize + __pyx_v_n)); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "cpython/array.pxd":150
* cdef Py_ssize_t origsize = Py_SIZE(self)
* resize_smart(self, origsize + n)
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize) # <<<<<<<<<<<<<<
* return 0
*
*/
memcpy((__pyx_v_self->data.as_chars + (__pyx_v_origsize * __pyx_v_itemsize)), __pyx_v_stuff, (__pyx_v_n * __pyx_v_itemsize));
/* "cpython/array.pxd":151
* resize_smart(self, origsize + n)
* memcpy(self.data.as_chars + origsize * itemsize, stuff, n * itemsize)
* return 0 # <<<<<<<<<<<<<<
*
* cdef inline int extend(array self, array other) except -1:
*/
__pyx_r = 0;
goto __pyx_L0;
/* "cpython/array.pxd":143
* return op
*
* cdef inline int extend_buffer(array self, char* stuff, Py_ssize_t n) except -1: # <<<<<<<<<<<<<<
* """ efficent appending of new stuff of same type
* (e.g. of same array type)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_AddTraceback("cpython.array.extend_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":153
* return 0
*
* cdef inline int extend(array self, array other) except -1: # <<<<<<<<<<<<<<
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode:
*/
static CYTHON_INLINE int __pyx_f_7cpython_5array_extend(arrayobject *__pyx_v_self, arrayobject *__pyx_v_other) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("extend", 0);
/* "cpython/array.pxd":155
* cdef inline int extend(array self, array other) except -1:
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode: # <<<<<<<<<<<<<<
* PyErr_BadArgument()
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*/
__pyx_t_1 = ((__pyx_v_self->ob_descr->typecode != __pyx_v_other->ob_descr->typecode) != 0);
if (__pyx_t_1) {
/* "cpython/array.pxd":156
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode:
* PyErr_BadArgument() # <<<<<<<<<<<<<<
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*
*/
__pyx_t_2 = PyErr_BadArgument(); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
/* "cpython/array.pxd":157
* if self.ob_descr.typecode != other.ob_descr.typecode:
* PyErr_BadArgument()
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other)) # <<<<<<<<<<<<<<
*
* cdef inline void zero(array self):
*/
__pyx_t_2 = __pyx_f_7cpython_5array_extend_buffer(__pyx_v_self, __pyx_v_other->data.as_chars, Py_SIZE(((PyObject *)__pyx_v_other))); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_t_2;
goto __pyx_L0;
/* "cpython/array.pxd":153
* return 0
*
* cdef inline int extend(array self, array other) except -1: # <<<<<<<<<<<<<<
* """ extend array with data from another array; types must match. """
* if self.ob_descr.typecode != other.ob_descr.typecode:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_AddTraceback("cpython.array.extend", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "cpython/array.pxd":159
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*
* cdef inline void zero(array self): # <<<<<<<<<<<<<<
* """ set all elements of array to zero. """
* memset(self.data.as_chars, 0, Py_SIZE(self) * self.ob_descr.itemsize)
*/
static CYTHON_INLINE void __pyx_f_7cpython_5array_zero(arrayobject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("zero", 0);
/* "cpython/array.pxd":161
* cdef inline void zero(array self):
* """ set all elements of array to zero. """
* memset(self.data.as_chars, 0, Py_SIZE(self) * self.ob_descr.itemsize) # <<<<<<<<<<<<<<
*/
memset(__pyx_v_self->data.as_chars, 0, (Py_SIZE(((PyObject *)__pyx_v_self)) * __pyx_v_self->ob_descr->itemsize));
/* "cpython/array.pxd":159
* return extend_buffer(self, other.data.as_chars, Py_SIZE(other))
*
* cdef inline void zero(array self): # <<<<<<<<<<<<<<
* """ set all elements of array to zero. """
* memset(self.data.as_chars, 0, Py_SIZE(self) * self.ob_descr.itemsize)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":113
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode=u"c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_u_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 3:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
case 4:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 114; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
/* "View.MemoryView":114
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode=u"c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_r = __pyx_array_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":113
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode=u"c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
PyObject **__pyx_v_p;
PyObject *__pyx_v_encode = NULL;
PyObject *__pyx_v_dim = NULL;
char __pyx_v_order;
PyObject *__pyx_v_decode = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
__Pyx_INCREF(__pyx_v_mode);
/* "View.MemoryView":120
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 120; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":121
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":123
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":124
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if self.itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":126
* raise ValueError("Empty shape tuple for cython.array")
*
* if self.itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_self->itemsize <= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":127
*
* if self.itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* encode = getattr(format, 'encode', None)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":129
* raise ValueError("itemsize <= 0 for cython.array")
*
* encode = getattr(format, 'encode', None) # <<<<<<<<<<<<<<
* if encode:
* format = encode('ASCII')
*/
__pyx_t_3 = __Pyx_GetAttr3(__pyx_v_format, __pyx_n_s_encode, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 129; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_encode = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":130
*
* encode = getattr(format, 'encode', None)
* if encode: # <<<<<<<<<<<<<<
* format = encode('ASCII')
* self._format = format
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_encode); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_2) {
/* "View.MemoryView":131
* encode = getattr(format, 'encode', None)
* if encode:
* format = encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format
* self.format = self._format
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_encode, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":132
* if encode:
* format = encode('ASCII')
* self._format = format # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":133
* format = encode('ASCII')
* self._format = format
* self.format = self._format # <<<<<<<<<<<<<<
*
* self._shape = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
*/
__pyx_t_4 = __Pyx_PyObject_AsString(__pyx_v_self->_format); if (unlikely((!__pyx_t_4) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 133; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_self->format = __pyx_t_4;
/* "View.MemoryView":135
* self.format = self._format
*
* self._shape = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim) # <<<<<<<<<<<<<<
* self._strides = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)malloc(((sizeof(Py_ssize_t)) * __pyx_v_self->ndim)));
/* "View.MemoryView":136
*
* self._shape = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
* self._strides = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim) # <<<<<<<<<<<<<<
*
* if not self._shape or not self._strides:
*/
__pyx_v_self->_strides = ((Py_ssize_t *)malloc(((sizeof(Py_ssize_t)) * __pyx_v_self->ndim)));
/* "View.MemoryView":138
* self._strides = <Py_ssize_t *> malloc(sizeof(Py_ssize_t)*self.ndim)
*
* if not self._shape or not self._strides: # <<<<<<<<<<<<<<
* free(self._shape)
* free(self._strides)
*/
__pyx_t_2 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (!__pyx_t_2) {
__pyx_t_5 = ((!(__pyx_v_self->_strides != 0)) != 0);
__pyx_t_6 = __pyx_t_5;
} else {
__pyx_t_6 = __pyx_t_2;
}
if (__pyx_t_6) {
/* "View.MemoryView":139
*
* if not self._shape or not self._strides:
* free(self._shape) # <<<<<<<<<<<<<<
* free(self._strides)
* raise MemoryError("unable to allocate shape or strides.")
*/
free(__pyx_v_self->_shape);
/* "View.MemoryView":140
* if not self._shape or not self._strides:
* free(self._shape)
* free(self._strides) # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape or strides.")
*
*/
free(__pyx_v_self->_strides);
/* "View.MemoryView":141
* free(self._shape)
* free(self._strides)
* raise MemoryError("unable to allocate shape or strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":144
*
*
* idx = 0 # <<<<<<<<<<<<<<
* for idx, dim in enumerate(shape):
* if dim <= 0:
*/
__pyx_v_idx = 0;
/* "View.MemoryView":145
*
* idx = 0
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_7 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_8); __pyx_t_1++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_8 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__Pyx_XDECREF_SET(__pyx_v_dim, __pyx_t_8);
__pyx_t_8 = 0;
__pyx_v_idx = __pyx_t_7;
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":146
* idx = 0
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*
*/
__pyx_t_8 = PyObject_RichCompare(__pyx_v_dim, __pyx_int_0, Py_LE); __Pyx_XGOTREF(__pyx_t_8); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
if (__pyx_t_6) {
/* "View.MemoryView":147
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
*
* self._shape[idx] = dim
*/
__pyx_t_8 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = PyTuple_New(2); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_dim);
PyTuple_SET_ITEM(__pyx_t_9, 1, __pyx_v_dim);
__Pyx_GIVEREF(__pyx_v_dim);
__pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_9); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8);
__Pyx_GIVEREF(__pyx_t_8);
__pyx_t_8 = 0;
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":149
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*
* self._shape[idx] = dim # <<<<<<<<<<<<<<
* idx += 1
*
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_dim); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_t_10;
/* "View.MemoryView":150
*
* self._shape[idx] = dim
* idx += 1 # <<<<<<<<<<<<<<
*
* if mode not in ("fortran", "c"):
*/
__pyx_v_idx = (__pyx_v_idx + 1);
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":152
* idx += 1
*
* if mode not in ("fortran", "c"): # <<<<<<<<<<<<<<
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
*/
__Pyx_INCREF(__pyx_v_mode);
__pyx_t_3 = __pyx_v_mode;
__pyx_t_6 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_n_s_fortran, Py_NE)); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_6) {
__pyx_t_2 = (__Pyx_PyString_Equals(__pyx_t_3, __pyx_n_s_c, Py_NE)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 152; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_5 = __pyx_t_2;
} else {
__pyx_t_5 = __pyx_t_6;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = (__pyx_t_5 != 0);
if (__pyx_t_6) {
/* "View.MemoryView":153
*
* if mode not in ("fortran", "c"):
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* cdef char order
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = PyTuple_New(1); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_8);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 153; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":156
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = 'F'
* else:
*/
__pyx_t_6 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 156; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_6) {
/* "View.MemoryView":157
* cdef char order
* if mode == 'fortran':
* order = 'F' # <<<<<<<<<<<<<<
* else:
* order = 'C'
*/
__pyx_v_order = 'F';
goto __pyx_L11;
}
/*else*/ {
/* "View.MemoryView":159
* order = 'F'
* else:
* order = 'C' # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
__pyx_v_order = 'C';
}
__pyx_L11:;
/* "View.MemoryView":161
* order = 'C'
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":164
* itemsize, self.ndim, order)
*
* decode = getattr(mode, 'decode', None) # <<<<<<<<<<<<<<
* if decode:
* mode = decode('ASCII')
*/
__pyx_t_3 = __Pyx_GetAttr3(__pyx_v_mode, __pyx_n_s_decode, Py_None); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 164; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_decode = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":165
*
* decode = getattr(mode, 'decode', None)
* if decode: # <<<<<<<<<<<<<<
* mode = decode('ASCII')
* self.mode = mode
*/
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_v_decode); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_6) {
/* "View.MemoryView":166
* decode = getattr(mode, 'decode', None)
* if decode:
* mode = decode('ASCII') # <<<<<<<<<<<<<<
* self.mode = mode
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_decode, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_mode, __pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L12;
}
__pyx_L12:;
/* "View.MemoryView":167
* if decode:
* mode = decode('ASCII')
* self.mode = mode # <<<<<<<<<<<<<<
*
* self.free_data = allocate_buffer
*/
if (!(likely(PyUnicode_CheckExact(__pyx_v_mode))||((__pyx_v_mode) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_v_mode)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 167; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = __pyx_v_mode;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":169
* self.mode = mode
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
* self.data = <char *>malloc(self.len)
*/
__pyx_t_3 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 170; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_6;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
* self.data = <char *>malloc(self.len)
* if not self.data:
*/
__pyx_t_6 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_6) {
/* "View.MemoryView":172
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":173
* if allocate_buffer:
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_6 = ((!(__pyx_v_self->data != 0)) != 0);
if (__pyx_t_6) {
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":176
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_6 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_6) {
/* "View.MemoryView":177
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":178
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 178; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = (__pyx_v_self->len / __pyx_v_itemsize);
for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_1; __pyx_t_10+=1) {
__pyx_v_i = __pyx_t_10;
/* "View.MemoryView":179
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":180
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
goto __pyx_L15;
}
__pyx_L15:;
goto __pyx_L13;
}
__pyx_L13:;
/* "View.MemoryView":113
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode=u"c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_encode);
__Pyx_XDECREF(__pyx_v_dim);
__Pyx_XDECREF(__pyx_v_decode);
__Pyx_XDECREF(__pyx_v_format);
__Pyx_XDECREF(__pyx_v_mode);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == b"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array_getbuffer_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":184
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == b"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":185
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == b"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == b"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_b_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 185; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":186
* cdef int bufmode = -1
* if self.mode == b"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == b"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
/* "View.MemoryView":187
* if self.mode == b"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == b"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_b_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 187; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":188
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == b"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":189
* elif self.mode == b"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":191
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":192
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":193
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":194
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":195
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":196
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":197
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":198
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":200
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":201
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":203
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":205
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":183
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == b"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":210
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":211
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
goto __pyx_L3;
}
/* "View.MemoryView":212
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":214
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":216
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
*
* free(self._strides)
*/
free(__pyx_v_self->data);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":218
* free(self.data)
*
* free(self._strides) # <<<<<<<<<<<<<<
* free(self._shape)
*
*/
free(__pyx_v_self->_strides);
/* "View.MemoryView":219
*
* free(self._strides)
* free(self._shape) # <<<<<<<<<<<<<<
*
* property memview:
*/
free(__pyx_v_self->_shape);
/* "View.MemoryView":209
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":223
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* Python wrapper */
static PyObject *get_memview(PyObject *__pyx_v_self); /*proto*/
static PyObject *get_memview(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = get_memview_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *get_memview_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":225
* def __get__(self):
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":226
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 226; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":223
* property memview:
* @cname('get_memview')
* def __get__(self): # <<<<<<<<<<<<<<
*
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array_MemoryView_5array_6__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array_MemoryView_5array_6__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":230
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 230; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":229
*
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array_MemoryView_5array_8__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array_MemoryView_5array_8__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":233
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(__pyx_t_2 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":232
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array_MemoryView_5array_10__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array_MemoryView_5array_10__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":236
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 236; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":235
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":244
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":245
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 245; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":248
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":247
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_array_type)), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 247; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":249
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":251
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":240
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 277; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":278
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":277
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":280
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":279
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":296
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":300
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":302
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":303
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":305
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview')
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":294
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":323
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
case 1:
if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
case 2:
if (kw_args > 0) {
PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 323; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":324
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":325
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":326
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_1 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)((PyObject *)__pyx_memoryview_type)));
if (!(__pyx_t_1 != 0)) {
__pyx_t_2 = (__pyx_v_obj != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
} else {
__pyx_t_3 = (__pyx_t_1 != 0);
}
if (__pyx_t_3) {
/* "View.MemoryView":327
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 327; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":328
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_3 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":329
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":330
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* self.lock = PyThread_allocate_lock()
*/
Py_INCREF(Py_None);
goto __pyx_L4;
}
__pyx_L4:;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":332
* Py_INCREF(Py_None)
*
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock == NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":333
*
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_3 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":334
* self.lock = PyThread_allocate_lock()
* if self.lock == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 334; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":336
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = self.view.format == b'O'
* else:
*/
__pyx_t_3 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":337
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = self.view.format == b'O' # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyObject_RichCompare(__pyx_t_5, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 337; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_3;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":339
* self.dtype_is_object = self.view.format == b'O'
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L6:;
/* "View.MemoryView":341
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":343
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":323
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":345
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":346
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
*
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":347
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
*
* if self.lock != NULL:
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":349
* __Pyx_ReleaseBuffer(&self.view)
*
* if self.lock != NULL: # <<<<<<<<<<<<<<
* PyThread_free_lock(self.lock)
*
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":350
*
* if self.lock != NULL:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":345
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":352
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":354
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":356
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (PyList_CheckExact(__pyx_v_index) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext;
}
for (;;) {
if (!__pyx_t_4 && PyList_CheckExact(__pyx_t_2)) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_4 && PyTuple_CheckExact(__pyx_t_2)) {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 356; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":357
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 357; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_7;
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":359
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":352
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":362
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":363
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":364
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
}
/* "View.MemoryView":366
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 366; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":369
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 369; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_2) {
/* "View.MemoryView":370
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 370; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":372
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 372; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":373
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 373; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":362
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":375
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":376
*
* def __setitem__(memoryview self, object index, object value):
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (likely(__pyx_t_1 != Py_None)) {
PyObject* sequence = __pyx_t_1;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_2 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_3);
#else
__pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
#endif
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 376; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_have_slices = __pyx_t_2;
__pyx_t_2 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":378
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 378; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":379
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 379; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_obj = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":380
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 380; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__pyx_t_4) {
/* "View.MemoryView":381
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 381; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":383
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
__pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 383; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":385
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 385; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
}
__pyx_L3:;
/* "View.MemoryView":375
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* have_slices, index = _unellipsify(index, self.view.ndim)
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":387
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":388
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, ((PyObject *)__pyx_memoryview_type));
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":389
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":390
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":391
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 391; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":390
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 390; __pyx_clineno = __LINE__; goto __pyx_L4_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L11_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":392
* obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L6_except_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":393
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
goto __pyx_L5_exception_handled;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L5_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
__pyx_L11_try_end:;
}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":395
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":387
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":397
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":401
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":402
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 402; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":403
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 403; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":401
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 401; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":397
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":405
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[128];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":407
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":412
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice));
/* "View.MemoryView":414
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":415
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":416
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_1 = ((__pyx_v_tmp == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":417
* tmp = malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 417; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":418
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":420
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":422
* item = <void *> array
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* try:
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":425
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* try: # <<<<<<<<<<<<<<
* self.assign_item_from_object(<char *> item, value)
* except:
*/
{
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":426
* else:
* try:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
* except:
* free(tmp)
*/
__pyx_t_5 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 426; __pyx_clineno = __LINE__; goto __pyx_L6_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L13_try_end;
__pyx_L6_error:;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "View.MemoryView":427
* try:
* self.assign_item_from_object(<char *> item, value)
* except: # <<<<<<<<<<<<<<
* free(tmp)
* raise
*/
/*except:*/ {
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 427; __pyx_clineno = __LINE__; goto __pyx_L8_except_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":428
* self.assign_item_from_object(<char *> item, value)
* except:
* free(tmp) # <<<<<<<<<<<<<<
* raise
*
*/
free(__pyx_v_tmp);
/* "View.MemoryView":429
* except:
* free(tmp)
* raise # <<<<<<<<<<<<<<
*
*
*/
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_ErrRestore(__pyx_t_5, __pyx_t_6, __pyx_t_7);
__pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 429; __pyx_clineno = __LINE__; goto __pyx_L8_except_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
goto __pyx_L7_exception_handled;
}
__pyx_L8_except_error:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L7_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
__pyx_L13_try_end:;
}
}
__pyx_L5:;
/* "View.MemoryView":433
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":434
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_7 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 434; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
goto __pyx_L16;
}
__pyx_L16:;
/* "View.MemoryView":435
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* free(tmp)
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
/* "View.MemoryView":437
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
* free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
free(__pyx_v_tmp);
/* "View.MemoryView":405
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":439
* free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":440
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 440; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":441
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 441; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":439
* free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":443
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
size_t __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":446
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 446; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":449
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 449; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":450
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":451
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyTuple_New(2); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
__pyx_t_5 = 0;
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 451; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__pyx_v_result = __pyx_t_5;
__pyx_t_5 = 0;
}
/*else:*/ {
/* "View.MemoryView":455
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
__pyx_t_7 = strlen(__pyx_v_self->view.format);
__pyx_t_8 = ((__pyx_t_7 == 1) != 0);
if (__pyx_t_8) {
/* "View.MemoryView":456
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_5 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(__pyx_t_5 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 456; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;};
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L6_except_return;
}
/* "View.MemoryView":457
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L10_try_end;
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
/* "View.MemoryView":452
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_9 = PyErr_ExceptionMatches(__pyx_t_5);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 452; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":453
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L5_except_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
goto __pyx_L4_exception_handled;
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
__pyx_L4_exception_handled:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
__pyx_L10_try_end:;
}
/* "View.MemoryView":443
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":459
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
Py_ssize_t __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
char *__pyx_t_9;
char *__pyx_t_10;
char *__pyx_t_11;
char *__pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":462
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, -1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 462; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":467
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":468
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 468; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":470
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
__pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__Pyx_INCREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_6)->tp_name), 0))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 470; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_6);
__pyx_t_6 = 0;
}
__pyx_L3:;
/* "View.MemoryView":472
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 472; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_8 = __pyx_v_bytesvalue;
__pyx_t_10 = PyBytes_AS_STRING(__pyx_t_8);
__pyx_t_11 = (__pyx_t_10 + PyBytes_GET_SIZE(__pyx_t_8));
for (__pyx_t_12 = __pyx_t_10; __pyx_t_12 < __pyx_t_11; __pyx_t_12++) {
__pyx_t_9 = __pyx_t_12;
__pyx_v_c = (__pyx_t_9[0]);
/* "View.MemoryView":473
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_7;
/* "View.MemoryView":472
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_7 = (__pyx_t_7 + 1);
/* "View.MemoryView":473
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":459
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":476
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview_getbuffer_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
char *__pyx_t_3;
void *__pyx_t_4;
int __pyx_t_5;
Py_ssize_t __pyx_t_6;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "View.MemoryView":477
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":478
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_2 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_2;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":480
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
__pyx_v_info->shape = NULL;
}
__pyx_L3:;
/* "View.MemoryView":482
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":483
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_2 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_2;
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":485
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
__pyx_v_info->strides = NULL;
}
__pyx_L4:;
/* "View.MemoryView":487
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":488
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_2 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_2;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":490
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->suboffsets = NULL;
}
__pyx_L5:;
/* "View.MemoryView":492
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":493
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_3 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_3;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":495
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
__pyx_v_info->format = NULL;
}
__pyx_L6:;
/* "View.MemoryView":497
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_4 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":498
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_5 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_5;
/* "View.MemoryView":499
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = 0
*/
__pyx_t_6 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_6;
/* "View.MemoryView":500
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = 0
* info.obj = self
*/
__pyx_t_6 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_6;
/* "View.MemoryView":501
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = 0 # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":502
* info.len = self.view.len
* info.readonly = 0
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":476
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_STRIDES:
* info.shape = self.view.shape
*/
/* function exit code */
__pyx_r = 0;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":509
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_transpose(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_transpose_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":510
* @cname('__pyx_memoryview_transpose')
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 510; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":511
* def __get__(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 511; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":512
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* property base:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":509
* property T:
* @cname('__pyx_memoryview_transpose')
* def __get__(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":516
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview__get__base_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":517
* @cname('__pyx_memoryview__get__base')
* def __get__(self):
* return self.obj # <<<<<<<<<<<<<<
*
* property shape:
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":516
* property base:
* @cname('__pyx_memoryview__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":521
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([self.view.shape[i] for i in xrange(self.view.ndim)])
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_shape(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_shape_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":522
* @cname('__pyx_memoryview_get_shape')
* def __get__(self):
* return tuple([self.view.shape[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<<
*
* property strides:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __pyx_v_self->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
__pyx_t_4 = PyInt_FromSsize_t((__pyx_v_self->view.shape[__pyx_v_i])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_4))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
}
__pyx_t_4 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 522; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
/* "View.MemoryView":521
* property shape:
* @cname('__pyx_memoryview_get_shape')
* def __get__(self): # <<<<<<<<<<<<<<
* return tuple([self.view.shape[i] for i in xrange(self.view.ndim)])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":526
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_strides(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_strides_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":527
* @cname('__pyx_memoryview_get_strides')
* def __get__(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":529
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([self.view.strides[i] for i in xrange(self.view.ndim)])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":531
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([self.view.strides[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<<
*
* property suboffsets:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __pyx_v_self->view.ndim;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
__pyx_t_5 = PyInt_FromSsize_t((__pyx_v_self->view.strides[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 531; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":526
* property strides:
* @cname('__pyx_memoryview_get_strides')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":535
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return [-1] * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_suboffsets(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_suboffsets_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":536
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return [-1] * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":537
* def __get__(self):
* if self.view.suboffsets == NULL:
* return [-1] * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([self.view.suboffsets[i] for i in xrange(self.view.ndim)])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(1 * ((__pyx_v_self->view.ndim<0) ? 0:__pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 537; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_self->view.ndim; __pyx_temp++) {
__Pyx_INCREF(__pyx_int_neg_1);
PyList_SET_ITEM(__pyx_t_2, __pyx_temp, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
}
}
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":539
* return [-1] * self.view.ndim
*
* return tuple([self.view.suboffsets[i] for i in xrange(self.view.ndim)]) # <<<<<<<<<<<<<<
*
* property ndim:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __pyx_v_self->view.ndim;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
__pyx_t_5 = PyInt_FromSsize_t((__pyx_v_self->view.suboffsets[__pyx_v_i])); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_5))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 539; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":535
* property suboffsets:
* @cname('__pyx_memoryview_get_suboffsets')
* def __get__(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return [-1] * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":543
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_ndim(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_ndim_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":544
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* property itemsize:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 544; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":543
* property ndim:
* @cname('__pyx_memoryview_get_ndim')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":548
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_itemsize(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_itemsize_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":549
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* property nbytes:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 549; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":548
* property itemsize:
* @cname('__pyx_memoryview_get_itemsize')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_nbytes(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_nbytes_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* property size:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 554; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":553
* property nbytes:
* @cname('__pyx_memoryview_get_nbytes')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":558
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview_get_size(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_get_size_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":559
* @cname('__pyx_memoryview_get_size')
* def __get__(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":560
* def __get__(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.shape:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":562
* result = 1
*
* for length in self.shape: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_shape); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (PyList_CheckExact(__pyx_t_3) || PyTuple_CheckExact(__pyx_t_3)) {
__pyx_t_4 = __pyx_t_3; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
for (;;) {
if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_3); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_3 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_3)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 562; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_3);
}
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":563
*
* for length in self.shape:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 563; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_3);
__pyx_t_3 = 0;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":565
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":567
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":558
* property size:
* @cname('__pyx_memoryview_get_size')
* def __get__(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":569
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":570
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":571
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
}
/* "View.MemoryView":573
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":569
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":576
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":577
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":576
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 576; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":579
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":580
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 580; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":579
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":583
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":586
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":587
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 587; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":583
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":589
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":592
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
*/
__pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp));
/* "View.MemoryView":593
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice, 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig(__pyx_v_mslice, 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 593; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":589
* return slice_is_contig(mslice, 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":595
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":597
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":599
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":600
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), __pyx_k_c, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 600; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":605
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 605; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":595
* return slice_is_contig(mslice, 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":607
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":609
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":611
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":612
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), __pyx_k_fortran, __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 612; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":617
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 617; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":607
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":621
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":622
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryview_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 622; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":623
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":624
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":621
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":627
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":628
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, ((PyObject *)__pyx_memoryview_type));
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":627
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":630
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
int __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":635
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":636
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 636; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":638
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":640
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 640; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":641
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":642
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":643
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (PyList_CheckExact(__pyx_v_tup) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext;
}
for (;;) {
if (!__pyx_t_6 && PyList_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_6 && PyTuple_CheckExact(__pyx_t_4)) {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = PyNumber_Add(__pyx_t_3, __pyx_int_1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 643; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":644
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":645
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":646
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_9 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_t_7);
PyList_SET_ITEM(__pyx_t_9, __pyx_temp, __pyx_t_7);
__Pyx_GIVEREF(__pyx_t_7);
}
}
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__pyx_t_10 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_9); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":647
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":649
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_t_9 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_10 = __Pyx_PyList_Append(__pyx_v_result, __pyx_t_9); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__pyx_L7:;
/* "View.MemoryView":650
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":652
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
__pyx_t_1 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
__pyx_t_1 = ((!(__Pyx_PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_11 = __pyx_t_1;
} else {
__pyx_t_11 = __pyx_t_2;
}
if (__pyx_t_11) {
/* "View.MemoryView":653
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_7 = PyTuple_New(1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
__pyx_t_9 = 0;
__pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_7, NULL); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_9, 0, 0, 0);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 653; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":655
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
if (!__pyx_v_have_slices) {
__pyx_t_11 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = __pyx_t_11;
} else {
__pyx_t_2 = __pyx_v_have_slices;
}
__pyx_v_have_slices = __pyx_t_2;
/* "View.MemoryView":656
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_10 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 656; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L6:;
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":658
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 658; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":659
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_2 = (__pyx_v_nslices != 0);
if (__pyx_t_2) {
/* "View.MemoryView":660
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)((PyObject*)(&PySlice_Type))), __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_t_3);
PyList_SET_ITEM(__pyx_t_4, __pyx_temp, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
}
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_10 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_4); if (unlikely(__pyx_t_10 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":662
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_2 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_2) {
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_9 = __pyx_t_3;
__pyx_t_3 = 0;
} else {
__pyx_t_9 = __pyx_t_4;
__pyx_t_4 = 0;
}
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 662; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_9);
__Pyx_GIVEREF(__pyx_t_9);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_9 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":630
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":664
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* cdef int i
* for i in range(ndim):
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":666
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* cdef int i
* for i in range(ndim): # <<<<<<<<<<<<<<
* if suboffsets[i] >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":667
* cdef int i
* for i in range(ndim):
* if suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_3 = (((__pyx_v_suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":668
* for i in range(ndim):
* if suboffsets[i] >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
/* "View.MemoryView":664
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* cdef int i
* for i in range(ndim):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":675
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":676
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":683
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)));
/* "View.MemoryView":687
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 687; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/* "View.MemoryView":689
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":690
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 690; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":691
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":693
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":694
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":700
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":701
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":706
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":707
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":711
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (PyList_CheckExact(__pyx_v_indices) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext;
}
for (;;) {
if (!__pyx_t_8 && PyList_CheckExact(__pyx_t_3)) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else if (!__pyx_t_8 && PyTuple_CheckExact(__pyx_t_3)) {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(exc_type == PyExc_StopIteration || PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else {__pyx_filename = __pyx_f[2]; __pyx_lineno = 711; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":712
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (__Pyx_PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":716
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 716; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":713
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 713; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
/* "View.MemoryView":719
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":720
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":721
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":722
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1;
/* "View.MemoryView":723
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":725
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_INCREF(__pyx_int_0);
__pyx_t_12 = __pyx_int_0;
} else {
__pyx_t_12 = __pyx_t_9;
__pyx_t_9 = 0;
}
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_12); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 725; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":726
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_12); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__Pyx_INCREF(__pyx_int_0);
__pyx_t_9 = __pyx_int_0;
} else {
__pyx_t_9 = __pyx_t_12;
__pyx_t_12 = 0;
}
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 726; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":727
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__Pyx_INCREF(__pyx_int_0);
__pyx_t_12 = __pyx_int_0;
} else {
__pyx_t_12 = __pyx_t_9;
__pyx_t_9 = 0;
}
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_t_12); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 727; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":729
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 729; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = (__pyx_t_12 != Py_None);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":730
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 730; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = (__pyx_t_12 != Py_None);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":731
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 731; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_12);
__pyx_t_1 = (__pyx_t_12 != Py_None);
__Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":733
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 733; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":739
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":741
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":742
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":743
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 743; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":744
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); {__pyx_filename = __pyx_f[2]; __pyx_lineno = 744; __pyx_clineno = __LINE__; goto __pyx_L1_error;} }
/* "View.MemoryView":742
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 742; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 742; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":747
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":748
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 747; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":747
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 747; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":675
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_12);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":772
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":792
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":794
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":795
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
goto __pyx_L4;
}
__pyx_L4:;
/* "View.MemoryView":796
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":797
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":800
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
__pyx_t_2 = (__pyx_v_have_step != 0);
if (__pyx_t_2) {
__pyx_t_1 = (__pyx_v_step < 0);
__pyx_t_4 = __pyx_t_1;
} else {
__pyx_t_4 = __pyx_t_2;
}
__pyx_v_negative_step = __pyx_t_4;
/* "View.MemoryView":802
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
if ((__pyx_v_have_step != 0)) {
__pyx_t_4 = (__pyx_v_step == 0);
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = (__pyx_v_have_step != 0);
}
if (__pyx_t_2) {
/* "View.MemoryView":803
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L6;
}
__pyx_L6:;
/* "View.MemoryView":806
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":807
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":808
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":809
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":810
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
goto __pyx_L9;
}
__pyx_L9:;
goto __pyx_L8;
}
/* "View.MemoryView":811
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":812
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":813
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L10;
}
/*else*/ {
/* "View.MemoryView":815
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_start = __pyx_v_shape;
}
__pyx_L10:;
goto __pyx_L8;
}
__pyx_L8:;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":817
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":818
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
goto __pyx_L11;
}
/*else*/ {
/* "View.MemoryView":820
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
__pyx_v_start = 0;
}
__pyx_L11:;
}
__pyx_L7:;
/* "View.MemoryView":822
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":823
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":824
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":825
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":826
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
goto __pyx_L14;
}
__pyx_L14:;
goto __pyx_L13;
}
/* "View.MemoryView":827
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":828
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
goto __pyx_L13;
}
__pyx_L13:;
goto __pyx_L12;
}
/*else*/ {
/* "View.MemoryView":830
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":831
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1;
goto __pyx_L15;
}
/*else*/ {
/* "View.MemoryView":833
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L15:;
}
__pyx_L12:;
/* "View.MemoryView":835
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":836
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
goto __pyx_L16;
}
__pyx_L16:;
/* "View.MemoryView":840
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":842
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
goto __pyx_L17;
}
__pyx_L17:;
/* "View.MemoryView":845
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":846
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
goto __pyx_L18;
}
__pyx_L18:;
/* "View.MemoryView":849
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":850
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":851
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":854
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":855
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
goto __pyx_L19;
}
/*else*/ {
/* "View.MemoryView":857
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L19:;
/* "View.MemoryView":859
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":860
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":862
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
goto __pyx_L22;
}
/*else*/ {
/* "View.MemoryView":864
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, __pyx_k_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 864; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L22:;
goto __pyx_L21;
}
/*else*/ {
/* "View.MemoryView":867
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L21:;
goto __pyx_L20;
}
__pyx_L20:;
/* "View.MemoryView":869
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":772
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":875
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":877
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1;
/* "View.MemoryView":878
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":881
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":882
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
else if (sizeof(Py_ssize_t) == sizeof(long) && unlikely(__pyx_v_itemsize == -1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 882; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_v_shape = (__pyx_v_view->len / __pyx_v_itemsize);
/* "View.MemoryView":883
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":885
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":886
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":887
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":888
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
goto __pyx_L4;
}
__pyx_L4:;
}
__pyx_L3:;
/* "View.MemoryView":890
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":891
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":892
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":893
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 893; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
goto __pyx_L5;
}
__pyx_L5:;
/* "View.MemoryView":895
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 896; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":898
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":899
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":900
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
goto __pyx_L8;
}
__pyx_L8:;
/* "View.MemoryView":902
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":875
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":908
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":909
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":911
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":912
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":916
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = (__pyx_v_ndim / 2);
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":917
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":918
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_4 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_4;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":919
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_5 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_4 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_4;
/* "View.MemoryView":921
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_6 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_6) {
__pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_8 = __pyx_t_7;
} else {
__pyx_t_8 = __pyx_t_6;
}
if (__pyx_t_8) {
/* "View.MemoryView":922
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, __pyx_k_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_9 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 922; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L5;
}
__pyx_L5:;
}
/* "View.MemoryView":924
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":908
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":941
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":942
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":941
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":944
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":945
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":946
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 946; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":948
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_vtabptr_memoryview->convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 948; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":944
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":950
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":951
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":952
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 952; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":954
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* property base:
*/
__pyx_t_3 = __pyx_vtabptr_memoryview->assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 954; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":950
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":958
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryviewslice__get__base(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryviewslice__get__base_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":959
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":958
* property base:
* @cname('__pyx_memoryviewslice__get__base')
* def __get__(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":965
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":974
* cdef int i
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":975
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(Py_None);
__pyx_r = Py_None;
goto __pyx_L0;
}
/* "View.MemoryView":980
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_INCREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_memoryviewslice_type)), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 980; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":982
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":983
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":985
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 985; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":986
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":988
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":989
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":990
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":991
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":992
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* result.flags = PyBUF_RECORDS
*/
Py_INCREF(Py_None);
/* "View.MemoryView":994
* Py_INCREF(Py_None)
*
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":996
* result.flags = PyBUF_RECORDS
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":997
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":998
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1000
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for i in range(ndim):
* result.view.len *= result.view.shape[i]
*/
__pyx_t_6 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_6;
/* "View.MemoryView":1001
*
* result.view.len = result.view.itemsize
* for i in range(ndim): # <<<<<<<<<<<<<<
* result.view.len *= result.view.shape[i]
*
*/
__pyx_t_7 = __pyx_v_ndim;
for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_7; __pyx_t_8+=1) {
__pyx_v_i = __pyx_t_8;
/* "View.MemoryView":1002
* result.view.len = result.view.itemsize
* for i in range(ndim):
* result.view.len *= result.view.shape[i] # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_v_result->__pyx_base.view.len = (__pyx_v_result->__pyx_base.view.len * (__pyx_v_result->__pyx_base.view.shape[__pyx_v_i]));
}
/* "View.MemoryView":1004
* result.view.len *= result.view.shape[i]
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1005
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1007
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1010
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1013
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1014
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1014; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1015
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1017
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1018
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1010
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice):
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 0);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1021
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1025
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1026
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1027
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1029
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1030
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1032
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_dim = __pyx_t_3;
/* "View.MemoryView":1033
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* if suboffsets == NULL:
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1034
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* if suboffsets == NULL:
* dst.suboffsets[dim] = -1
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1035
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* if suboffsets == NULL: # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = -1
* else:
*/
__pyx_t_4 = ((__pyx_v_suboffsets == NULL) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1036
* dst.strides[dim] = strides[dim]
* if suboffsets == NULL:
* dst.suboffsets[dim] = -1 # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[dim] = suboffsets[dim]
*/
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = -1;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":1038
* dst.suboffsets[dim] = -1
* else:
* dst.suboffsets[dim] = suboffsets[dim] # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = (__pyx_v_suboffsets[__pyx_v_dim]);
}
__pyx_L5:;
}
/* "View.MemoryView":1021
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1044
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1045
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1045; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1041
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1055
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), ((PyObject *)__pyx_memoryviewslice_type));
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1059
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1060
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1062
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1064
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1062; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1048
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1070
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1071
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1072
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1074
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1070
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1077
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1082
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1083
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1085
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1086
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1087
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1088
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
}
}
__pyx_L4_break:;
/* "View.MemoryView":1090
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1091
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1092
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1093
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
}
}
__pyx_L7_break:;
/* "View.MemoryView":1095
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1096
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
}
/*else*/ {
/* "View.MemoryView":1098
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1077
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1101
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1108
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1109
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1110
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1111
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1113
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1114
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_1 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_1) {
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1115
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_3 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_3) {
__pyx_t_3 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_4 = (__pyx_t_3 != 0);
} else {
__pyx_t_4 = __pyx_t_2;
}
__pyx_t_2 = __pyx_t_4;
} else {
__pyx_t_2 = __pyx_t_1;
}
if (__pyx_t_2) {
/* "View.MemoryView":1116
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent));
goto __pyx_L4;
}
/*else*/ {
/* "View.MemoryView":1118
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
__pyx_t_5 = __pyx_v_dst_extent;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1119
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize);
/* "View.MemoryView":1120
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1121
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1123
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
__pyx_t_5 = __pyx_v_dst_extent;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1124
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1128
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1129
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1101
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1131
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1134
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1131
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1138
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1141
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
* cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1143
* cdef Py_ssize_t size = src.memview.view.itemsize
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* size *= src.shape[i]
*
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1144
*
* for i in range(ndim):
* size *= src.shape[i] # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i]));
}
/* "View.MemoryView":1146
* size *= src.shape[i]
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1138
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef int i
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1149
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1158
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1159
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_idx = __pyx_t_3;
/* "View.MemoryView":1160
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1161
* for idx in range(ndim):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1163
* stride = stride * shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride = stride * shape[idx]
*/
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1164
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride = stride * shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1165
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride = stride * shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1167
* stride = stride * shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1149
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1170
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1181
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1182
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1184
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1185
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1186
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1186; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1189
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1190
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1191
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1192
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1193
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1195
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order);
/* "View.MemoryView":1199
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":1200
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1201
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src, order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1203
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1204
*
* if slice_is_contig(src, order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size);
goto __pyx_L9;
}
/*else*/ {
/* "View.MemoryView":1206
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1208
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1170
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1213
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1216
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1216; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1215
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1213
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1219
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1220
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_1 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyUnicode_Format(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_v_error, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1220; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1219
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1224
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1225
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_v_error, __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1225; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/*else*/ {
/* "View.MemoryView":1227
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
{__pyx_filename = __pyx_f[2]; __pyx_lineno = 1227; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "View.MemoryView":1223
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1230
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1238
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1239
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1241
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1242
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1243
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1246
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1247
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
goto __pyx_L3;
}
/* "View.MemoryView":1248
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1249
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1251
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1253
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1254
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1255
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1256
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1257
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
goto __pyx_L7;
}
/*else*/ {
/* "View.MemoryView":1259
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
__pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1259; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L7:;
goto __pyx_L6;
}
__pyx_L6:;
/* "View.MemoryView":1261
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1262
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, __pyx_k_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_4 == -1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1262; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L8;
}
__pyx_L8:;
}
/* "View.MemoryView":1264
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(&src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1266
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(&src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig((&__pyx_v_src), __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1267
*
* if not slice_is_contig(&src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
goto __pyx_L10;
}
__pyx_L10:;
/* "View.MemoryView":1269
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == NULL)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1269; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_tmpdata = __pyx_t_6;
/* "View.MemoryView":1270
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
goto __pyx_L9;
}
__pyx_L9:;
/* "View.MemoryView":1272
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1275
*
*
* if slice_is_contig(&src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1276
*
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'C', __pyx_v_ndim);
goto __pyx_L12;
}
/* "View.MemoryView":1277
* if slice_is_contig(&src, 'C', ndim):
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((&__pyx_v_src), 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1278
* direct_copy = slice_is_contig(&dst, 'C', ndim)
* elif slice_is_contig(&src, 'F', ndim):
* direct_copy = slice_is_contig(&dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig((&__pyx_v_dst), 'F', __pyx_v_ndim);
goto __pyx_L12;
}
__pyx_L12:;
/* "View.MemoryView":1280
* direct_copy = slice_is_contig(&dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1282
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1283
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* return 0
*/
memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim));
/* "View.MemoryView":1284
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* return 0
*
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1285
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
}
goto __pyx_L11;
}
__pyx_L11:;
/* "View.MemoryView":1287
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_7 = (__pyx_t_2 != 0);
if (__pyx_t_7) {
/* "View.MemoryView":1290
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1290; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/* "View.MemoryView":1291
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == 0)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 1291; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
goto __pyx_L14;
}
__pyx_L14:;
/* "View.MemoryView":1293
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1294
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1295
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1297
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1298
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1230
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1301
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *slice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_slice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
/* "View.MemoryView":1305
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1307
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* slice.shape[i + offset] = slice.shape[i]
* slice.strides[i + offset] = slice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1308
*
* for i in range(ndim - 1, -1, -1):
* slice.shape[i + offset] = slice.shape[i] # <<<<<<<<<<<<<<
* slice.strides[i + offset] = slice.strides[i]
* slice.suboffsets[i + offset] = slice.suboffsets[i]
*/
(__pyx_v_slice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->shape[__pyx_v_i]);
/* "View.MemoryView":1309
* for i in range(ndim - 1, -1, -1):
* slice.shape[i + offset] = slice.shape[i]
* slice.strides[i + offset] = slice.strides[i] # <<<<<<<<<<<<<<
* slice.suboffsets[i + offset] = slice.suboffsets[i]
*
*/
(__pyx_v_slice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->strides[__pyx_v_i]);
/* "View.MemoryView":1310
* slice.shape[i + offset] = slice.shape[i]
* slice.strides[i + offset] = slice.strides[i]
* slice.suboffsets[i + offset] = slice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_slice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_slice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1312
* slice.suboffsets[i + offset] = slice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* slice.shape[i] = 1
* slice.strides[i] = slice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1313
*
* for i in range(offset):
* slice.shape[i] = 1 # <<<<<<<<<<<<<<
* slice.strides[i] = slice.strides[0]
* slice.suboffsets[i] = -1
*/
(__pyx_v_slice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1314
* for i in range(offset):
* slice.shape[i] = 1
* slice.strides[i] = slice.strides[0] # <<<<<<<<<<<<<<
* slice.suboffsets[i] = -1
*
*/
(__pyx_v_slice->strides[__pyx_v_i]) = (__pyx_v_slice->strides[0]);
/* "View.MemoryView":1315
* slice.shape[i] = 1
* slice.strides[i] = slice.strides[0]
* slice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_slice->suboffsets[__pyx_v_i]) = -1;
}
/* "View.MemoryView":1301
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *slice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1323
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1327
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1328
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
goto __pyx_L3;
}
__pyx_L3:;
/* "View.MemoryView":1323
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1335
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1332
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1338
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
int __pyx_t_3;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1342
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) {
__pyx_v_i = __pyx_t_2;
/* "View.MemoryView":1343
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_3 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1344
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_3 = (__pyx_v_inc != 0);
if (__pyx_t_3) {
/* "View.MemoryView":1345
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
goto __pyx_L6;
}
/*else*/ {
/* "View.MemoryView":1347
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
goto __pyx_L5;
}
/*else*/ {
/* "View.MemoryView":1349
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1352
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1338
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1358
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1361
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1362
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1364
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1358
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
/* "View.MemoryView":1372
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1373
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1375
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1376
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1377
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize);
/* "View.MemoryView":1378
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
goto __pyx_L3;
}
/*else*/ {
/* "View.MemoryView":1380
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
__pyx_t_2 = __pyx_v_extent;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1381
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1383
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_array___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return get_memview(o);
}
static PyMethodDef __pyx_methods_array[] = {
{__Pyx_NAMESTR("__getattr__"), (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, __Pyx_DOCSTR(0)},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
0, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
0, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
__pyx_array_getbuffer, /*bf_getbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
0, /*bf_releasebuffer*/
#endif
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table.array"), /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table.Enum"), /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) {
Py_DECREF(o); o = 0;
}
return o;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryview___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_transpose(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview__get__base(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_shape(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_strides(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_suboffsets(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_ndim(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_itemsize(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_nbytes(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryview_get_size(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{__Pyx_NAMESTR("is_c_contig"), (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, __Pyx_DOCSTR(0)},
{__Pyx_NAMESTR("is_f_contig"), (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, __Pyx_DOCSTR(0)},
{__Pyx_NAMESTR("copy"), (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, __Pyx_DOCSTR(0)},
{__Pyx_NAMESTR("copy_fortran"), (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, __Pyx_DOCSTR(0)},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, 0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, 0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, 0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, 0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, 0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, 0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, 0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, 0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
#endif
#if PY_VERSION_HEX >= 0x02060000
0, /*bf_releasebuffer*/
#endif
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table.memoryview"), /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if PY_VERSION_HEX >= 0x030400a1
if (unlikely(Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
++Py_REFCNT(o);
__pyx_memoryviewslice___dealloc__(o);
--Py_REFCNT(o);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_memoryviewslice__get__base(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, 0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
__Pyx_NAMESTR("radiotool.algorithms.par_build_table._memoryviewslice"), /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
__Pyx_DOCSTR("Internal class for passing memoryview slices to Python"), /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
#if PY_VERSION_HEX >= 0x02060000
0, /*tp_version_tag*/
#endif
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{__Pyx_NAMESTR("build_table"), (PyCFunction)__pyx_pw_9radiotool_10algorithms_15par_build_table_1build_table, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef __pyx_moduledef = {
#if PY_VERSION_HEX < 0x03020000
{ PyObject_HEAD_INIT(NULL) NULL, 0, NULL },
#else
PyModuleDef_HEAD_INIT,
#endif
__Pyx_NAMESTR("par_build_table"),
0, /* m_doc */
-1, /* m_size */
__pyx_methods /* m_methods */,
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_b_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 0, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 0, 1, 1},
{&__pyx_n_s_decode, __pyx_k_decode, sizeof(__pyx_k_decode), 0, 0, 1, 1},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_first_pause, __pyx_k_first_pause, sizeof(__pyx_k_first_pause), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_b_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 0, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_max_beats, __pyx_k_max_beats, sizeof(__pyx_k_max_beats), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_min_beats, __pyx_k_min_beats, sizeof(__pyx_k_min_beats), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_penalty, __pyx_k_penalty, sizeof(__pyx_k_penalty), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_releasebuffer, __pyx_k_pyx_releasebuffer, sizeof(__pyx_k_pyx_releasebuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_trans_cost, __pyx_k_trans_cost, sizeof(__pyx_k_trans_cost), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_or_stri, __pyx_k_unable_to_allocate_shape_or_stri, sizeof(__pyx_k_unable_to_allocate_shape_or_stri), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_xrange, __pyx_k_xrange, sizeof(__pyx_k_xrange), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if PY_MAJOR_VERSION >= 3
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_builtin_xrange = __Pyx_GetBuiltinName(__pyx_n_s_xrange); if (!__pyx_builtin_xrange) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 233; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 145; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 363; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 392; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 577; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 797; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
static int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "radiotool/algorithms/par_build_table.pyx":512
*
* # double arrays for use throughout the computation
* cdef array dtemplate = array('d') # <<<<<<<<<<<<<<
* cdef array array1, array2, array3, array4, array5, array6, array7, array8
* cdef double[:] mv1, mv2, mv3, mv4, mv5, mv6, f, g
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_n_s_d); if (unlikely(!__pyx_tuple_)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 512; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "radiotool/algorithms/par_build_table.pyx":532
* mv6 = array8
*
* cdef array ar, template = array('i') # <<<<<<<<<<<<<<
* ar = clone(template, penalty.shape[1], False)
* cdef int[:] global_path = ar
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_n_s_i); if (unlikely(!__pyx_tuple__2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 532; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "View.MemoryView":124
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if self.itemsize <= 0:
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__3)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":127
*
* if self.itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* encode = getattr(format, 'encode', None)
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__4)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 127; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":131
* encode = getattr(format, 'encode', None)
* if encode:
* format = encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format
* self.format = self._format
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__5)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":141
* free(self._shape)
* free(self._strides)
* raise MemoryError("unable to allocate shape or strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_or_stri); if (unlikely(!__pyx_tuple__6)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":166
* decode = getattr(mode, 'decode', None)
* if decode:
* mode = decode('ASCII') # <<<<<<<<<<<<<<
* self.mode = mode
*
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__7)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 166; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":174
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__8)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 174; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "View.MemoryView":190
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__9)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 190; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "View.MemoryView":453
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__10)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 453; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":529
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([self.view.strides[i] for i in xrange(self.view.ndim)])
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__11)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 529; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":646
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_tuple__12 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__12)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 646; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":649
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
__pyx_tuple__13 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__13)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 649; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":660
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_tuple__14 = PyTuple_Pack(1, Py_None); if (unlikely(!__pyx_tuple__14)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 660; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":668
* for i in range(ndim):
* if suboffsets[i] >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__15)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 668; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__16)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__17)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__18)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__18);
__Pyx_GIVEREF(__pyx_tuple__18);
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__19)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 287; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__20)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
return 0;
__pyx_L1_error:;
return -1;
}
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC initpar_build_table(void); /*proto*/
PyMODINIT_FUNC initpar_build_table(void)
#else
PyMODINIT_FUNC PyInit_par_build_table(void); /*proto*/
PyMODINIT_FUNC PyInit_par_build_table(void)
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_par_build_table(void)", 0);
if ( __Pyx_check_binary_version() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#ifdef __Pyx_CyFunction_USED
if (__Pyx_CyFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4(__Pyx_NAMESTR("par_build_table"), __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME)); if (unlikely(!__pyx_b)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if CYTHON_COMPILING_IN_PYPY
Py_INCREF(__pyx_b);
#endif
if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
/*--- Initialize various global constants etc. ---*/
if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
if (__pyx_module_is_main_radiotool__algorithms__par_build_table) {
if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
if (!PyDict_GetItemString(modules, "radiotool.algorithms.par_build_table")) {
if (unlikely(PyDict_SetItemString(modules, "radiotool.algorithms.par_build_table", __pyx_m) < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
}
#endif
/*--- Builtin init code ---*/
if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Constants init code ---*/
if (unlikely(__Pyx_InitCachedConstants() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
/*--- Variable export code ---*/
/*--- Function export code ---*/
/*--- Type init code ---*/
if (PyType_Ready(&__pyx_type___pyx_array) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 96; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_array.tp_print = 0;
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 275; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_MemviewEnum.tp_print = 0;
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryview.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 308; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 930; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_type___pyx_memoryviewslice.tp_print = 0;
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 930; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
/*--- Type import code ---*/
__pyx_ptype_7cpython_5array_array = __Pyx_ImportType("array", "array", sizeof(arrayobject), 0); if (unlikely(!__pyx_ptype_7cpython_5array_array)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 58; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
/*--- Variable import code ---*/
/*--- Function import code ---*/
/*--- Execution code ---*/
/* "radiotool/algorithms/par_build_table.pyx":1
* #cython: infer_types=True # <<<<<<<<<<<<<<
* #cython: boundscheck=False
* #cython: wraparound=False
*/
__pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":207
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 207; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":282
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 282; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":283
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":284
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 284; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 287; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)((PyObject *)__pyx_MemviewEnum_type)), __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 288; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":504
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 504; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":961
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), __pyx_k_getbuffer_obj_view_flags); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[2]; __pyx_lineno = 961; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "View.MemoryView":1368
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
__Pyx_AddTraceback("init radiotool.algorithms.par_build_table", __pyx_clineno, __pyx_lineno, __pyx_filename);
Py_DECREF(__pyx_m); __pyx_m = 0;
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init radiotool.algorithms.par_build_table");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if PY_MAJOR_VERSION < 3
return;
#else
return __pyx_m;
#endif
}
/* Runtime support code */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule((char *)modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif /* CYTHON_REFNANNY */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
static CYTHON_INLINE int __Pyx_IsLittleEndian(void) {
unsigned int n = 1;
return *(unsigned char*)(&n) != 0;
}
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t < '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1) /* First char was not a digit */
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count; /* Consume from buffer string */
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break; /* breaks both loops as ctx->enc_count == 0 */
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue; /* empty struct */
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static CYTHON_INLINE PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number;
int ndim = ctx->head->field->type->ndim;
;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break; /* not a 'break' in the loop */
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case 10:
case 13:
++ts;
break;
case '<':
if (!__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_IsLittleEndian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T': /* substruct */
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}': /* end of substruct; either repeat or move on */
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
} /* fall through */
case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 's': case 'p':
if (ctx->enc_type == *ts && got_Z == ctx->is_complex &&
ctx->enc_packmode == ctx->new_packmode) {
ctx->enc_count += ctx->new_count;
} else {
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
}
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static CYTHON_INLINE int __Pyx_GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
if (obj == Py_None || obj == NULL) {
__Pyx_ZeroBuffer(buf);
return 0;
}
buf->buf = NULL;
if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail;
if (buf->ndim != nd) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned)buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_ZeroBuffer(buf);
return -1;
}
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (info->buf == NULL) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (!buf) {
PyErr_SetString(PyExc_ValueError,
"buf is NULL.");
goto fail;
} else if (memviewslice->memview || memviewslice->data) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE void __pyx_fatalerror(const char *fmt, ...) {
va_list vargs;
char msg[200];
va_start(vargs, fmt);
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
Py_FatalError(msg);
va_end(vargs);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview || (PyObject *) memview == Py_None)
return; /* allow uninitialized memoryview assignment */
if (__pyx_get_slice_count(memview) < 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (first_time) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (!memview ) {
return;
} else if ((PyObject *) memview == Py_None) {
memslice->memview = NULL;
return;
}
if (__pyx_get_slice_count(memview) <= 0)
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (last_time) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_Restore(type, value, tb);
#endif
}
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(type, value, tb);
#endif
}
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
}
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
#if PY_VERSION_HEX >= 0x02060000
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
#endif
result = (*call)(func, arg, kw);
#if PY_VERSION_HEX >= 0x02060000
Py_LeaveRecursiveCall();
#endif
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) {
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
}
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (none_allowed && obj == Py_None) return 1;
else if (exact) {
if (likely(Py_TYPE(obj) == type)) return 1;
#if PY_MAJOR_VERSION == 2
else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(PyObject_TypeCheck(obj, type))) return 1;
}
__Pyx_RaiseArgumentTypeInvalid(name, obj, type);
return 0;
}
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
#if PY_VERSION_HEX < 0x02050000
if (PyClass_Check(type)) {
#else
if (PyType_Check(type)) {
#endif
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
#if PY_VERSION_HEX < 0x02050000
if (PyInstance_Check(type)) {
type = (PyObject*) ((PyInstanceObject*)type)->in_class;
Py_INCREF(type);
} else {
type = 0;
PyErr_SetString(PyExc_TypeError,
"raise: exception must be an old-style class or instance");
goto raise_error;
}
#else
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
#endif
}
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else /* Python 3+ */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
if (PyObject_IsSubclass(instance_class, type)) {
type = instance_class;
} else {
instance_class = NULL;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
#if PY_VERSION_HEX >= 0x03030000
if (cause) {
#else
if (cause && cause != Py_None) {
#endif
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
PyThreadState *tstate = PyThreadState_GET();
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
goto bad;
PyErr_Clear();
r = d;
Py_INCREF(d);
}
return r;
bad:
return NULL;
}
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
#if CYTHON_PEP393_ENABLED
if (unlikely(PyUnicode_READY(s1) < 0) || unlikely(PyUnicode_READY(s2) < 0))
return -1;
#endif
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, length * kind);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
length = strlen(cstring);
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
length = stop - start;
if (unlikely(length <= 0))
return PyUnicode_FromUnicode(NULL, 0);
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(PyObject_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
static CYTHON_INLINE void __Pyx_ExceptionSave(PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyThreadState *tstate = PyThreadState_GET();
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
#else
PyErr_GetExcInfo(type, value, tb);
#endif
}
static void __Pyx_ExceptionReset(PyObject *type, PyObject *value, PyObject *tb) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(type, value, tb);
#endif
}
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyThreadState *tstate = PyThreadState_GET();
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_COMPILING_IN_CPYTHON
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyList_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (wraparound & unlikely(i < 0)) i += PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck) {
#if CYTHON_COMPILING_IN_CPYTHON
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (PyErr_ExceptionMatches(PyExc_OverflowError))
PyErr_Clear();
else
return NULL;
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION==3&&PY_MINOR_VERSION==0)
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_7cpython_5array_array)) return __pyx_pw_7cpython_5array_5array_1__getbuffer__(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (PyObject_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
#if PY_VERSION_HEX < 0x02060000
if (obj->ob_type->tp_dict) {
PyObject *getbuffer_cobj = PyObject_GetItem(
obj->ob_type->tp_dict, __pyx_n_s_pyx_getbuffer);
if (getbuffer_cobj) {
getbufferproc func = (getbufferproc) PyCObject_AsVoidPtr(getbuffer_cobj);
Py_DECREF(getbuffer_cobj);
if (!func)
goto fail;
return func(obj, view, flags);
} else {
PyErr_Clear();
}
}
#endif
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
#if PY_VERSION_HEX < 0x02060000
fail:
#endif
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
#if PY_VERSION_HEX >= 0x02060000
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
#endif
if (PyObject_TypeCheck(obj, __pyx_ptype_7cpython_5array_array)) { __pyx_pw_7cpython_5array_5array_3__releasebuffer__(obj, view); return; }
#if PY_VERSION_HEX < 0x02060000
if (obj->ob_type->tp_dict) {
PyObject *releasebuffer_cobj = PyObject_GetItem(
obj->ob_type->tp_dict, __pyx_n_s_pyx_releasebuffer);
if (releasebuffer_cobj) {
releasebufferproc func = (releasebufferproc) PyCObject_AsVoidPtr(releasebuffer_cobj);
Py_DECREF(releasebuffer_cobj);
if (!func)
goto fail;
func(obj, view);
return;
} else {
PyErr_Clear();
}
}
#endif
goto nofail;
#if PY_VERSION_HEX < 0x02060000
fail:
#endif
PyErr_WriteUnraisable(obj);
nofail:
Py_DECREF(obj);
view->obj = NULL;
}
#endif /* PY_MAJOR_VERSION < 3 */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (buf->strides[dim] != sizeof(void *)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (buf->strides[dim] != buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (stride < buf->itemsize) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (spec & (__Pyx_MEMVIEW_PTR)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (buf->suboffsets) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (buf->suboffsets && buf->suboffsets[dim] >= 0) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1)
{
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (stride * buf->itemsize != buf->strides[i] &&
buf->shape[i] > 1) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (buf->ndim != ndim) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if ((unsigned) buf->itemsize != dtype->size) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (!__pyx_check_strides(buf, i, ndim, spec))
goto fail;
if (!__pyx_check_suboffsets(buf, i, ndim, spec))
goto fail;
}
if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 2,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func) \
{ \
func_type value = func(x); \
if (sizeof(target_type) < sizeof(func_type)) { \
if (unlikely(value != (func_type) (target_type) value)) { \
func_type zero = 0; \
PyErr_SetString(PyExc_OverflowError, \
(is_unsigned && unlikely(value < zero)) ? \
"can't convert negative value to " #target_type : \
"value too large to convert to " #target_type); \
return (target_type) -1; \
} \
} \
return (target_type) value; \
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG)
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(int)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return (int) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, PyLong_AsUnsignedLong)
} else if (sizeof(int) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(int, unsigned long long, PyLong_AsUnsignedLongLong)
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(int)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return +(int) ((PyLongObject*)x)->ob_digit[0];
case -1: return -(int) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyLong_AsLong)
} else if (sizeof(int) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(int, long long, PyLong_AsLongLong)
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(int) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
} else if (sizeof(long) <= sizeof(unsigned long long)) {
return PyLong_FromUnsignedLongLong((unsigned long long) value);
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(long long)) {
return PyLong_FromLongLong((long long) value);
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
static PyObject *__pyx_memview_get_int(const char *itemp) {
return (PyObject *) __Pyx_PyInt_From_int(*(int *) itemp);
}
static int __pyx_memview_set_int(const char *itemp, PyObject *obj) {
int value = __Pyx_PyInt_As_int(obj);
if ((value == (int)-1) && PyErr_Occurred())
return 0;
*(int *) itemp = value;
return 1;
}
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice *mvs,
char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs->memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs->suboffsets[index] >= 0 || mvs->strides[index] != itemsize)
return 0;
itemsize *= mvs->shape[index];
}
return 1;
}
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (from_mvs->suboffsets[i] >= 0) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000 && !(PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION == 0)
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_VERSION_HEX < 0x03030000
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
#if PY_VERSION_HEX >= 0x02050000
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if (strchr(__Pyx_MODULE_NAME, '.')) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(1);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
#endif
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0; /* try absolute import on failure */
}
#endif
if (!module) {
#if PY_VERSION_HEX < 0x03030000
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
#else
if (level>0) {
PyErr_SetString(PyExc_RuntimeError, "Relative import is not supported for Python <=2.4.");
goto bad;
}
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, NULL);
#endif
bad:
#if PY_VERSION_HEX < 0x03030000
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG)
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(char)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return (char) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, PyLong_AsUnsignedLong)
} else if (sizeof(char) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(char, unsigned long long, PyLong_AsUnsignedLongLong)
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(char)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return +(char) ((PyLongObject*)x)->ob_digit[0];
case -1: return -(char) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyLong_AsLong)
} else if (sizeof(char) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(char, long long, PyLong_AsLongLong)
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) -1, const_zero = 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG)
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(long)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return (long) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (unlikely(Py_SIZE(x) < 0)) {
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, PyLong_AsUnsignedLong)
} else if (sizeof(long) <= sizeof(unsigned long long)) {
__PYX_VERIFY_RETURN_INT(long, unsigned long long, PyLong_AsUnsignedLongLong)
}
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
if (sizeof(digit) <= sizeof(long)) {
switch (Py_SIZE(x)) {
case 0: return 0;
case 1: return +(long) ((PyLongObject*)x)->ob_digit[0];
case -1: return -(long) ((PyLongObject*)x)->ob_digit[0];
}
}
#endif
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyLong_AsLong)
} else if (sizeof(long) <= sizeof(long long)) {
__PYX_VERIFY_RETURN_INT(long, long long, PyLong_AsLongLong)
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_Int(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_Int(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_double, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS, 1,
&__Pyx_TypeInfo_int, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
#if PY_VERSION_HEX < 0x02050000
return PyErr_Warn(NULL, message);
#else
return PyErr_WarnEx(NULL, message, 1);
#endif
}
return 0;
}
#ifndef __PYX_HAVE_RT_ImportModule
#define __PYX_HAVE_RT_ImportModule
static PyObject *__Pyx_ImportModule(const char *name) {
PyObject *py_name = 0;
PyObject *py_module = 0;
py_name = __Pyx_PyIdentifier_FromString(name);
if (!py_name)
goto bad;
py_module = PyImport_Import(py_name);
Py_DECREF(py_name);
return py_module;
bad:
Py_XDECREF(py_name);
return 0;
}
#endif
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name,
size_t size, int strict)
{
PyObject *py_module = 0;
PyObject *result = 0;
PyObject *py_name = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
py_module = __Pyx_ImportModule(module_name);
if (!py_module)
goto bad;
py_name = __Pyx_PyIdentifier_FromString(class_name);
if (!py_name)
goto bad;
result = PyObject_GetAttr(py_module, py_name);
Py_DECREF(py_name);
py_name = 0;
Py_DECREF(py_module);
py_module = 0;
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if (!strict && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility",
module_name, class_name);
#if PY_VERSION_HEX < 0x02050000
if (PyErr_Warn(NULL, warning) < 0) goto bad;
#else
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
#endif
}
else if ((size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s has the wrong size, try recompiling",
module_name, class_name);
goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(py_module);
Py_XDECREF(result);
return NULL;
}
#endif
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = (start + end) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, new_max*sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0, /*int argcount,*/
0, /*int kwonlyargcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line, /*int firstlineno,*/
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_globals = 0;
PyFrameObject *py_frame = 0;
py_code = __pyx_find_code_object(c_line ? c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? c_line : py_line, py_code);
}
py_globals = PyModule_GetDict(__pyx_m);
if (!py_globals) goto bad;
py_frame = PyFrame_New(
PyThreadState_GET(), /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
py_globals, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
py_frame->f_lineno = py_line;
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else /* Python 3+ has unicode identifiers */
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, strlen(c_str));
}
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
#if PY_VERSION_HEX < 0x03030000
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
#else /* PY_VERSION_HEX < 0x03030000 */
if (PyUnicode_READY(o) == -1) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (PyUnicode_IS_ASCII(o)) {
*length = PyUnicode_GET_DATA_SIZE(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
return PyUnicode_AsUTF8AndSize(o, length);
#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
#endif /* PY_VERSION_HEX < 0x03030000 */
} else
#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
#if !CYTHON_COMPILING_IN_PYPY
#if PY_VERSION_HEX >= 0x02060000
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
PyNumberMethods *m;
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (PyInt_Check(x) || PyLong_Check(x))
#else
if (PyLong_Check(x))
#endif
return Py_INCREF(x), x;
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = PyNumber_Int(x);
}
else if (m && m->nb_long) {
name = "long";
res = PyNumber_Long(x);
}
#else
if (m && m->nb_int) {
name = "int";
res = PyNumber_Long(x);
}
#endif
if (res) {
#if PY_MAJOR_VERSION < 3
if (!PyInt_Check(res) && !PyLong_Check(res)) {
#else
if (!PyLong_Check(res)) {
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
name, name, Py_TYPE(res)->tp_name);
Py_DECREF(res);
return NULL;
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#endif
#endif
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b)))
return PyInt_AS_LONG(b);
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3
#if CYTHON_USE_PYLONG_INTERNALS
switch (Py_SIZE(b)) {
case -1: return -(sdigit)((PyLongObject*)b)->ob_digit[0];
case 0: return 0;
case 1: return ((PyLongObject*)b)->ob_digit[0];
}
#endif
#endif
#if PY_VERSION_HEX < 0x02060000
return PyInt_AsSsize_t(b);
#else
return PyLong_AsSsize_t(b);
#endif
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
#if PY_VERSION_HEX < 0x02050000
if (ival <= LONG_MAX)
return PyInt_FromLong((long)ival);
else {
unsigned char *bytes = (unsigned char *) &ival;
int one = 1; int little = (int)*(unsigned char*)&one;
return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
}
#else
return PyInt_FromSize_t(ival);
#endif
}
#endif /* Py_PYTHON_H */
|
GB_binop__land_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_uint32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__land_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__land_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint32)
// A*D function (colscale): GB (_AxD__land_uint32)
// D*A function (rowscale): GB (_DxB__land_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__land_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__land_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint32)
// C=scalar+B GB (_bind1st__land_uint32)
// C=scalar+B' GB (_bind1st_tran__land_uint32)
// C=A+scalar GB (_bind2nd__land_uint32)
// C=A'+scalar GB (_bind2nd_tran__land_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_UINT32 || GxB_NO_LAND_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__land_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__land_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pfmg_setup_rap7.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_struct_ls.h"
#include "pfmg.h"
/*--------------------------------------------------------------------------
* Macro to "change coordinates". This routine is written as though
* coarsening is being done in the z-direction. This macro is used to
* allow for coarsening to be done in the x- and y-directions also.
*--------------------------------------------------------------------------*/
#define MapIndex(in_index, cdir, out_index) \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 2); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 0); \
cdir = (cdir + 1) % 3; \
hypre_IndexD(out_index, cdir) = hypre_IndexD(in_index, 1); \
cdir = (cdir + 1) % 3;
/*--------------------------------------------------------------------------
* hypre_PFMGCreateCoarseOp7
* Sets up new coarse grid operator stucture. Fine grid
* operator is 7pt and so is coarse, i.e. non-Galerkin.
*--------------------------------------------------------------------------*/
hypre_StructMatrix *
hypre_PFMGCreateCoarseOp7( hypre_StructMatrix *R,
hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructGrid *coarse_grid,
HYPRE_Int cdir )
{
hypre_StructMatrix *RAP;
hypre_Index *RAP_stencil_shape;
hypre_StructStencil *RAP_stencil;
HYPRE_Int RAP_stencil_size;
HYPRE_Int RAP_stencil_dim;
HYPRE_Int RAP_num_ghost[] = {1, 1, 1, 1, 1, 1};
hypre_Index index_temp;
HYPRE_Int k, j, i;
HYPRE_Int stencil_rank;
RAP_stencil_dim = 3;
/*-----------------------------------------------------------------------
* Define RAP_stencil
*-----------------------------------------------------------------------*/
stencil_rank = 0;
/*-----------------------------------------------------------------------
* non-symmetric case
*-----------------------------------------------------------------------*/
if (!hypre_StructMatrixSymmetric(A))
{
/*--------------------------------------------------------------------
* 7 point coarse grid stencil
*--------------------------------------------------------------------*/
RAP_stencil_size = 7;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 2; k++)
{
for (j = -1; j < 2; j++)
{
for (i = -1; i < 2; i++)
{
/*--------------------------------------------------------------
* Storage for 7 elements (c,w,e,n,s,a,b)
*--------------------------------------------------------------*/
if (i*j == 0 && i*k == 0 && j*k == 0)
{
hypre_SetIndex3(index_temp,i,j,k);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
/*-----------------------------------------------------------------------
* symmetric case
*-----------------------------------------------------------------------*/
else
{
/*--------------------------------------------------------------------
* 7 point coarse grid stencil
* Only store the lower triangular part + diagonal = 4 entries,
* lower triangular means the lower triangular part on the matrix
* in the standard lexicographic ordering.
*--------------------------------------------------------------------*/
RAP_stencil_size = 4;
RAP_stencil_shape = hypre_CTAlloc(hypre_Index, RAP_stencil_size);
for (k = -1; k < 1; k++)
{
for (j = -1; j < 1; j++)
{
for (i = -1; i < 1; i++)
{
/*--------------------------------------------------------------
* Store 4 elements in (c,w,s,b)
*--------------------------------------------------------------*/
if (i*j == 0 && i*k == 0 && j*k == 0)
{
hypre_SetIndex3(index_temp,i,j,k);
MapIndex(index_temp, cdir, RAP_stencil_shape[stencil_rank]);
stencil_rank++;
}
}
}
}
}
RAP_stencil = hypre_StructStencilCreate(RAP_stencil_dim, RAP_stencil_size,
RAP_stencil_shape);
RAP = hypre_StructMatrixCreate(hypre_StructMatrixComm(A),
coarse_grid, RAP_stencil);
hypre_StructStencilDestroy(RAP_stencil);
/*-----------------------------------------------------------------------
* Coarse operator in symmetric iff fine operator is
*-----------------------------------------------------------------------*/
hypre_StructMatrixSymmetric(RAP) = hypre_StructMatrixSymmetric(A);
/*-----------------------------------------------------------------------
* Set number of ghost points - one one each boundary
*-----------------------------------------------------------------------*/
hypre_StructMatrixSetNumGhost(RAP, RAP_num_ghost);
return RAP;
}
/*--------------------------------------------------------------------------
* hypre_PFMGBuildCoarseOp7
* Sets up new coarse grid operator stucture. Fine grid operator is 7pt and
* so is coarse, i.e. non-Galerkin.
*
* Uses the non-Galerkin strategy from Ashby & Falgout's original ParFlow
* algorithm. For constant_coefficient==2, see [issue663].
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PFMGBuildCoarseOp7( hypre_StructMatrix *A,
hypre_StructMatrix *P,
hypre_StructMatrix *R,
HYPRE_Int cdir,
hypre_Index cindex,
hypre_Index cstride,
hypre_StructMatrix *RAP )
{
HYPRE_Int ndim = hypre_StructMatrixNDim(A);
hypre_Index index;
hypre_Index index_temp;
hypre_StructGrid *fgrid;
hypre_BoxArray *fgrid_boxes;
hypre_Box *fgrid_box;
HYPRE_Int *fgrid_ids;
hypre_StructGrid *cgrid;
hypre_BoxArray *cgrid_boxes;
hypre_Box *cgrid_box;
HYPRE_Int *cgrid_ids;
hypre_IndexRef cstart, bfstart, stridef;
hypre_Index fstart, bcstart, stridec;
hypre_Index loop_size;
HYPRE_Int constant_coefficient;
HYPRE_Int fi, ci, fbi;
hypre_Box *A_dbox;
hypre_Box *P_dbox;
hypre_Box *RAP_dbox;
hypre_BoxArray *bdy_boxes, *tmp_boxes;
hypre_Box *bdy_box, *fcbox;
HYPRE_Real *pb, *pa;
HYPRE_Real *a_cc, *a_cw, *a_ce, *a_cs, *a_cn, *a_cb, *a_ca;
HYPRE_Real *rap_cc, *rap_cw, *rap_ce, *rap_cs, *rap_cn;
HYPRE_Real *rap_cb, *rap_ca;
HYPRE_Real west, east, south, north;
HYPRE_Real center_int, center_bdy;
HYPRE_Int iA, iAm1, iAp1;
HYPRE_Int iAc;
HYPRE_Int iP, iPm1, iPp1;
HYPRE_Int OffsetA;
HYPRE_Int OffsetP;
stridef = cstride;
hypre_SetIndex3(stridec, 1, 1, 1);
fgrid = hypre_StructMatrixGrid(A);
fgrid_boxes = hypre_StructGridBoxes(fgrid);
fgrid_ids = hypre_StructGridIDs(fgrid);
cgrid = hypre_StructMatrixGrid(RAP);
cgrid_boxes = hypre_StructGridBoxes(cgrid);
cgrid_ids = hypre_StructGridIDs(cgrid);
constant_coefficient = hypre_StructMatrixConstantCoefficient(RAP);
hypre_assert( hypre_StructMatrixConstantCoefficient(A) == constant_coefficient );
if ( constant_coefficient==0 )
{
hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 0 );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 0 );
}
else /* 1 or 2 */
{
hypre_assert( hypre_StructMatrixConstantCoefficient(R) == 1 );
hypre_assert( hypre_StructMatrixConstantCoefficient(P) == 1 );
}
fcbox = hypre_BoxCreate(ndim);
bdy_boxes = hypre_BoxArrayCreate(0, ndim);
tmp_boxes = hypre_BoxArrayCreate(0, ndim);
fi = 0;
hypre_ForBoxI(ci, cgrid_boxes)
{
while (fgrid_ids[fi] != cgrid_ids[ci])
{
fi++;
}
cgrid_box = hypre_BoxArrayBox(cgrid_boxes, ci);
fgrid_box = hypre_BoxArrayBox(fgrid_boxes, fi);
cstart = hypre_BoxIMin(cgrid_box);
hypre_StructMapCoarseToFine(cstart, cindex, cstride, fstart);
A_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A), fi);
P_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(P), fi);
RAP_dbox = hypre_BoxArrayBox(hypre_StructMatrixDataSpace(RAP), ci);
/*-----------------------------------------------------------------
* Extract pointers for interpolation operator:
* pb is pointer for weight for f-point below c-point
* pa is pointer for weight for f-point above c-point
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
pa = hypre_StructMatrixExtractPointerByIndex(P, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
pb = hypre_StructMatrixExtractPointerByIndex(P, fi, index) -
hypre_BoxOffsetDistance(P_dbox, index);
/*-----------------------------------------------------------------
* Extract pointers for 7-point fine grid operator:
*
* a_cc is pointer for center coefficient
* a_cw is pointer for west coefficient
* a_ce is pointer for east coefficient
* a_cs is pointer for south coefficient
* a_cn is pointer for north coefficient
* a_cb is pointer for below coefficient
* a_ca is pointer for above coefficient
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
a_cc = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
a_cw = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
a_ce = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
a_cs = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
a_cn = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
a_cb = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
a_ca = hypre_StructMatrixExtractPointerByIndex(A, fi, index);
/*-----------------------------------------------------------------
* Extract pointers for coarse grid operator
* rap_cc is pointer for center coefficient (etc.)
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,0);
MapIndex(index_temp, cdir, index);
rap_cc = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,-1,0,0);
MapIndex(index_temp, cdir, index);
rap_cw = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,1,0,0);
MapIndex(index_temp, cdir, index);
rap_ce = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,-1,0);
MapIndex(index_temp, cdir, index);
rap_cs = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,1,0);
MapIndex(index_temp, cdir, index);
rap_cn = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,0,-1);
MapIndex(index_temp, cdir, index);
rap_cb = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
rap_ca = hypre_StructMatrixExtractPointerByIndex(RAP, ci, index);
/*-----------------------------------------------------------------
* Define offsets for fine grid stencil and interpolation
*
* In the BoxLoop below I assume iA and iP refer to data associated
* with the point which we are building the stencil for. The below
* Offsets are used in refering to data associated with other points.
*-----------------------------------------------------------------*/
hypre_SetIndex3(index_temp,0,0,1);
MapIndex(index_temp, cdir, index);
OffsetP = hypre_BoxOffsetDistance(P_dbox,index);
OffsetA = hypre_BoxOffsetDistance(A_dbox,index);
/*--------------------------------------------------------------
* Loop for symmetric 7-point fine grid operator; produces a
* symmetric 7-point coarse grid operator.
*--------------------------------------------------------------*/
if ( constant_coefficient==0 )
{
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop3Begin(hypre_StructMatrixNDim(A), loop_size,
P_dbox, cstart, stridec, iP,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iP,iA,iAc,iAm1,iAp1,iPm1,iPp1,west,east,south,north) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop3For(iP, iA, iAc)
{
iAm1 = iA - OffsetA;
iAp1 = iA + OffsetA;
iPm1 = iP - OffsetP;
iPp1 = iP + OffsetP;
rap_cb[iAc] = a_cb[iA] * pa[iPm1];
rap_ca[iAc] = a_ca[iA] * pb[iPp1];
west = a_cw[iA] + 0.5 * a_cw[iAm1] + 0.5 * a_cw[iAp1];
east = a_ce[iA] + 0.5 * a_ce[iAm1] + 0.5 * a_ce[iAp1];
south = a_cs[iA] + 0.5 * a_cs[iAm1] + 0.5 * a_cs[iAp1];
north = a_cn[iA] + 0.5 * a_cn[iAm1] + 0.5 * a_cn[iAp1];
/*-----------------------------------------------------
* Prevent non-zero entries reaching off grid
*-----------------------------------------------------*/
if(a_cw[iA] == 0.0) west = 0.0;
if(a_ce[iA] == 0.0) east = 0.0;
if(a_cs[iA] == 0.0) south = 0.0;
if(a_cn[iA] == 0.0) north = 0.0;
rap_cw[iAc] = west;
rap_ce[iAc] = east;
rap_cs[iAc] = south;
rap_cn[iAc] = north;
rap_cc[iAc] = a_cc[iA]
+ a_cw[iA] + a_ce[iA] + a_cs[iA] + a_cn[iA]
+ a_cb[iA] * pb[iP] + a_ca[iA] * pa[iP]
- west - east - south - north;
}
hypre_BoxLoop3End(iP, iA, iAc);
}
else if ( constant_coefficient==1 )
{
rap_cb[0] = rap_ca[0] = a_cb[0] * pa[0];
rap_cw[0] = rap_ce[0] = 2.0*a_cw[0];
rap_cs[0] = rap_cn[0] = 2.0*a_cs[0];
rap_cc[0] = a_cc[0] - 2.0*( a_cw[0] + a_cs[0] - rap_cb[0] );
}
else if ( constant_coefficient==2 )
{
/* NOTE: This does not reduce to either of the above operators unless
* the row sum is zero and the interpolation weights are 1/2 */
rap_cb[0] = rap_ca[0] = 0.5*a_cb[0];
rap_cw[0] = rap_ce[0] = 2.0*a_cw[0];
rap_cs[0] = rap_cn[0] = 2.0*a_cs[0];
center_int = 3.0*a_cb[0];
center_bdy = 0.5*a_cb[0] + (a_cw[0] + a_cs[0] + a_cb[0]);
hypre_BoxGetSize(cgrid_box, loop_size);
hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size,
A_dbox, fstart, stridef, iA,
RAP_dbox, cstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(iA, iAc)
{
rap_cc[iAc] = 2.0*a_cc[iA] + center_int;
}
hypre_BoxLoop2End(iA, iAc);
hypre_CopyBox(cgrid_box, fcbox);
hypre_StructMapCoarseToFine(hypre_BoxIMin(fcbox), cindex, cstride,
hypre_BoxIMin(fcbox));
hypre_StructMapCoarseToFine(hypre_BoxIMax(fcbox), cindex, cstride,
hypre_BoxIMax(fcbox));
hypre_BoxArraySetSize(bdy_boxes, 0);
if (hypre_BoxIMinD(fcbox, cdir) == hypre_BoxIMinD(fgrid_box, cdir))
{
hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, -1, bdy_boxes);
}
if (hypre_BoxIMaxD(fcbox, cdir) == hypre_BoxIMaxD(fgrid_box, cdir))
{
hypre_BoxBoundaryIntersect(fcbox, fgrid, cdir, 1, tmp_boxes);
hypre_AppendBoxArray(tmp_boxes, bdy_boxes);
}
hypre_ForBoxI(fbi, bdy_boxes)
{
bdy_box = hypre_BoxArrayBox(bdy_boxes, fbi);
hypre_BoxGetSize(bdy_box, loop_size);
bfstart = hypre_BoxIMin(bdy_box);
hypre_StructMapFineToCoarse(bfstart, cindex, cstride, bcstart);
hypre_BoxLoop2Begin(hypre_StructMatrixNDim(A), loop_size,
A_dbox, bfstart, stridef, iA,
RAP_dbox, bcstart, stridec, iAc);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,iA,iAc) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(iA, iAc)
{
rap_cc[iAc] -= 0.5*a_cc[iA] + center_bdy;
}
hypre_BoxLoop2End(iA, iAc);
}
}
} /* end ForBoxI */
hypre_BoxDestroy(fcbox);
hypre_BoxArrayDestroy(bdy_boxes);
hypre_BoxArrayDestroy(tmp_boxes);
return hypre_error_flag;
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (1024*3)
#define M (16*32)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
double A[M][N], B[M][N], C[N], D[N], E[N];
double S[M];
double p[2];
int main(void) {
check_offloading();
INIT();
int cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
int tms = 16;
int th = 32;
int threads[1]; threads[0] = th-1;
//
// Test: proc_bind clause
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(master)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(close)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES proc_bind(spread)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i]; \
B[idx][i] += D[i] + E[i]; \
},
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: private, shared clauses on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p,q) shared(A,B,C,D,E)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = 2; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
p = C[i] + D[i]; \
q = D[i] + E[i]; \
A[idx][i] += p; \
B[idx][i] += q; \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p,q)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p = -4; \
double q = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
},
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p; \
B[idx][i] += D[i] + E[i] + q; \
if (i == N-1) { \
p += 6; \
q += 9; \
} \
}
,
{
double tmp = p + q;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: lastprivate clause on omp target teams distribute parallel for with nested parallel.
//
TESTD("omp target teams distribute parallel for num_teams(tms) num_threads(th)",
for (int idx = 0; idx < tms*th; idx++) {
double q0[1];
double q1[1];
double q2[1];
double q3[1];
S[idx] = 0;
for (int i = 0; i < N; i++) {
A[idx][i] = B[idx][i] = 0;
}
_Pragma("omp parallel for lastprivate(q0) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q0[0] = C[i] + D[i];
A[idx][i] += q0[0];
}
_Pragma("omp parallel for schedule(auto) lastprivate(q1) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q1[0] = C[i] + D[i];
A[idx][i] += q1[0];
}
_Pragma("omp parallel for schedule(static) lastprivate(q2) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q2[0] = D[i] + E[i];
B[idx][i] += q2[0];
}
_Pragma("omp parallel for schedule(static,9) lastprivate(q3) if(threads[0] > 1) num_threads(threads[0])")
for (int i = 0; i < N; i++) {
q3[0] = D[i] + E[i];
B[idx][i] += q3[0];
}
double tmp = q0[0] + q1[0] + q2[0] + q3[0];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
}
, VERIFY(0, tms*th, S[i], (double) 2 * (N + (N/2*(N+1))) ));
//
// Test: private clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES private(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = 2; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
p[0] = C[i] + D[i]; \
p[1] = D[i] + E[i]; \
A[idx][i] += p[0]; \
B[idx][i] += p[1]; \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) 6 + SUMS * (N/2*(N+1))))
//
// Test: firstprivate clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES firstprivate(p)
#include "defines.h"
NESTED_PARALLEL_FOR(
double p[2]; \
p[0] = -4; p[1] = 4; \
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < N; i++) { \
A[idx][i] += C[i] + D[i] + p[0]; \
B[idx][i] += D[i] + E[i] + p[1]; \
if (i == N-1) { \
p[0] += 6; \
p[1] += 9; \
} \
}
,
{
double tmp = p[0] + p[1];
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: collapse clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES collapse(2)
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
for (int i = 0; i < N; i++) { \
A[idx][i] = B[idx][i] = 0; \
}
,
for (int i = 0; i < 1024; i++) { \
for (int j = 0; j < 3; j++) { \
A[idx][i*3+j] += C[i*3+j] + D[i*3+j]; \
B[idx][i*3+j] += D[i*3+j] + E[i*3+j]; \
} \
}
,
{
double tmp = 0;
for (int i = 0; i < N; i++) {
tmp += A[idx][i] + B[idx][i];
}
S[idx] += tmp;
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: ordered clause on omp target teams distribute parallel for with nested parallel.
//
#undef NESTED_PARALLEL_FOR_CLAUSES
#define NESTED_PARALLEL_FOR_CLAUSES ordered
#include "defines.h"
NESTED_PARALLEL_FOR(
S[idx] = 0; \
,
for (int i = 0; i < N; i++) { \
_Pragma("omp ordered") \
S[idx] += C[i] + D[i]; \
}
,
{
},
VERIFY(0, tms*th, S[i], (double) SUMS * (N/2*(N+1))))
//
// Test: Ensure coalesced scheduling on GPU.
//
if (!cpuExec) {
TESTD("omp target teams distribute parallel for num_teams(tms) num_threads(th)",
for (int idx = 0; idx < tms*th; idx++) {
S[idx] = 0;
for (int i = 0; i < 96; i++) {
A[idx][i] = 0;
}
_Pragma("omp parallel for num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(auto) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
_Pragma("omp parallel for schedule(static,1) num_threads(32)")
for (int i = 0; i < 96; i++) {
A[idx][i] += i - omp_get_thread_num();
}
double tmp = 0;
for (int i = 0; i < 96; i++) {
tmp += A[idx][i];
}
S[idx] = tmp;
}
, VERIFY(0, tms*th, S[i], (double) 3 * 95 * 48 ));
} else {
DUMP_SUCCESS(1);
}
//DUMP_SUCCESS(1);
return 0;
}
|
pvc_eva.h | #pragma once
#include "emp-pvc/judge.h"
#include "emp-pvc/common.h"
#include "emp-pvc/pipe_io.h"
#include "emp-pvc/hash_array.h"
#include "emp-pvc/internal_eva.h"
#include "emp-pvc/gc_commit_gen.h"
#include "emp-pvc/ecdsa.h"
#include <emp-ot/np.h>
#include <emp-ot/mextension_kos.h>
#include <deque>
#include <memory>
#include <vector>
#include <thread>
#include <iostream>
namespace emp {
thread_local bool randomized_input = false;
thread_local int itr = 0;
template <typename IO>
class PVCEva: public ProtocolExecution {
private:
using garbler_t = HalfGateEva<IO>;
using evaluator_t = InternalEva<IO>;
using bytes_t = std::vector<uint8_t>;
using blk_vec_t = std::vector<block>;
using blk_que_t = std::deque<block>;
struct sign_st {
int32_t sig_len;
uint8_t payload[ECDSA_SIGN_BYTES];
};
using sign_t = sign_st[1];
inline int get_index(int j) const { return std::min(j, num_io_ - 1); }
const int num_io_;
std::vector<IO *> io_;
IO *aux_io_ = nullptr;
GCHashIO *hash_gc_io_ = nullptr;
HalfGateEva<GCHashIO> *hashed_gc_ = nullptr;
std::vector<garbler_t *> gc_;
std::vector<evaluator_t*> eva_;
ver_key_t ver_key_;
Hash hsher;
public:
explicit PVCEva(std::vector<IO *> iov, IO *aio)
: ProtocolExecution(BOB),
num_io_(iov.size()),
io_(iov),
aux_io_(aio)
{
assert(num_io_ > 0);
gc_.resize(num_io_);
eva_.resize(num_io_);
for (int i = 0; i < num_io_; ++i) {
gc_[i] = new garbler_t(io_[i]);
eva_[i] = new evaluator_t(io_[i], gc_[i]);
}
hsh_ow_.reserve(1 << 25);
output_labels_.reserve(1 << 25);
}
~PVCEva() {
std::memset(seeds_B_, 0, sizeof(seeds_B_));
for (auto gc : gc_) delete gc;
for (auto eva : eva_) delete eva;
if (hash_gc_io_) delete hash_gc_io_;
if (hashed_gc_) delete hashed_gc_;
}
template <typename RT>
bool run(typename TPC<RT>::T const& circ, const void *bob_input) {
ot_on_seeds();
std::thread ot_in_head_th([this, &circ] {
ot_in_the_head<RT>(circ);
});
bool valid = true;
/* concurrently run small tasks along with real OT */
std::thread small_tasks([this, &circ, &valid] {
simulate_gc_commit<RT>(circ); /* simulate gc commit first */
if (!recv_and_check_circuit_commits()) {
std::cout << "invalid gc commitment\n";
valid = false;
}
send_seeds_hash();
});
run_real_ot<RT>(circ, bob_input);
int invalid_index = -1;
small_tasks.join();
valid = recv_and_check_sign_trans(&invalid_index);
if (valid) {
send_witness();
run_real_gc<RT>(circ, bob_input);
} else {
std::cout << "invalid ecdsa sign" << std::endl;
create_cheated_cert(invalid_index);
judge_cert<RT>(circ);
return valid;
}
ot_in_head_th.join();
valid = check_ot_trans(&invalid_index);
if (!valid) {
std::cout << "invalid real gc commit" << std::endl;
create_cheated_cert(invalid_index);
judge_cert<RT>(circ);
}
return valid;
}
void feed(block *label, int party, const bool *b, int len)
{
if (state == State::GC && party == BOB) {
assert(input_labels_.size() >= len);
assert(!randomized_input);
auto st = input_labels_.begin();
auto ed = st + len;
auto tmp = st;
block *ptr = label;
while (tmp != ed)
*ptr++ = *tmp++;
input_labels_.erase(st, ed);
return;
}
eva_[get_index(itr)]->randomized = randomized_input;
eva_[get_index(itr)]->feed(label, party, b, len);
if (state == State::OT && party == BOB && !randomized_input) {
input_labels_.insert(input_labels_.end(), label, label + len);
}
}
void reveal(bool *b, int party, const block *label, int len)
{
assert(state == State::GC);
if (party == BOB) {
output_labels_.insert(output_labels_.end(), label, label + len);
}
}
void setup_real_gc(int j) {
assert(j >= 0 && j < MAX_PVC_ITERATION);
setup_exec(j);
state = State::GC;
eva_[get_index(j)]->state = state;
hash_gc_io_ = new GCHashIO(io_[get_index(j)]);
hashed_gc_ = new HalfGateEva<GCHashIO>(hash_gc_io_);
CircuitExecution::circ_exec = hashed_gc_;
}
void setup_ot(int j) {
assert(j >= 0 && j < MAX_PVC_ITERATION);
setup_exec(j);
state = State::OT;
randomized_input = (j != chosen_index_);
eva_[get_index(j)]->state = state;
}
void setup_exec(int j) {
assert(j >= 0 && j < MAX_PVC_ITERATION);
itr = j;
state = State::INIT;
randomized_input = false;
const int idx = get_index(j);
if (gc_[idx]) delete gc_[idx];
if (eva_[idx]) delete eva_[idx];
gc_[idx] = new garbler_t(io_[idx]);
eva_[idx] = new evaluator_t(io_[idx], gc_[idx], &seeds_B_[j]);
CircuitExecution::circ_exec = gc_[idx];
ProtocolExecution::prot_exec = this;
}
private:
int chosen_index_ = 0; /* 1-of-L. */
State state; /* protocol_execution state */
uint64_t label_id = 0;
PRP label_prp;
blk_que_t input_labels_; /* input labels for the evaluation circuit */
blk_vec_t output_labels_;
std::vector<int64_t> hsh_ow_; /* output labels for the evaluation circuit. */
std::vector<int64_t> rev_hsh_ow_; /* hash of output labels recv from Alice. */
block seeds_B_[MAX_PVC_ITERATION];
block seeds_A_[MAX_PVC_ITERATION];
bytes_t tx_sd_ot_[MAX_PVC_ITERATION]; /* transcript of seeds OT. */
bytes_t rev_ot_tx_[MAX_PVC_ITERATION]; /* received OT transcript. */
hash_t sim_ot_tx_dgst_[MAX_PVC_ITERATION]; /* digest of the simulated OT transcript. */
Com sim_com_[MAX_PVC_ITERATION]; /* simulated gc commit */
Com rev_com_[MAX_PVC_ITERATION]; /* received gc commit. */
sign_t tx_sign_[MAX_PVC_ITERATION]; /* Alice's sign on whole transcript. */
/*
* Run MAX_PVC_ITERATION 1-of-2 OTs on Alices' seeds
*/
void ot_on_seeds() {
PRG prg;//("this-is-a-fixed-prg-too");
prg.random_data(&chosen_index_, sizeof(int));
chosen_index_ = std::abs(chosen_index_) % MAX_PVC_ITERATION;
prg.random_block(seeds_B_, MAX_PVC_ITERATION);
LoggedOTCO<IO> logOT(nullptr);
for (size_t j = 0; j < MAX_PVC_ITERATION; ++j) {
logOT.io = io_.at(get_index(j));
logOT.reseed(&seeds_B_[j]);
bool bb = ((chosen_index_ - j) == 0);
logOT.recv(&seeds_A_[j], &bb, 1);
tx_sd_ot_[j].resize(logOT.log_length());
logOT.get_log(tx_sd_ot_[j].data(), tx_sd_ot_[j].size());
logOT.clear();
}
}
void send_seeds_hash() const {
/* send hash of seedB */
hash_t digest;
for (const auto &seed : seeds_B_) {
hsher.hash_once(digest.data(), &seed, sizeof(block));
aux_io_->send_data(digest.data(), sizeof(hash_t));
}
aux_io_->flush();
}
bool verify_trans_sign(int j) {
assert(j >= 0 && j < MAX_PVC_ITERATION);
auto io = io_[get_index(j)];
io->recv_data(&(tx_sign_[j]->sig_len), sizeof(int32_t));
io->recv_data(tx_sign_[j]->payload, tx_sign_[j]->sig_len);
hash_t h;
int32_t tx_len;
io->recv_data(&tx_len, sizeof(int32_t));
rev_ot_tx_[j].resize(tx_len);
io->recv_data(rev_ot_tx_[j].data(), tx_len);
hsher.hash_once((char *)h.data(), rev_ot_tx_[j].data(), tx_len);
PVCJudge judge;
return judge.verify_sign(j, rev_com_[j], h, seeds_B_[j],
tx_sd_ot_[j], tx_sign_[j]->payload,
tx_sign_[j]->sig_len, ver_key_);
}
void receive_ver_key() {
int32_t len;
io_[0]->recv_data(&len, sizeof(len));
if (len < 0 || len > ECDSA_VK_BYTES) {
std::cerr << "Received invalid verification key" << std::endl;
exit(1);
} else {
uint8_t buf[ECDSA_VK_BYTES];
io_[0]->recv_data(buf, len);
ecdsa_deserialize_ver_key(ver_key_, buf, len);
}
}
void send_witness() const {
int32_t j = chosen_index_;
io_[0]->send_data(&j, sizeof(int32_t));
for (int i = 0; i < MAX_PVC_ITERATION; ++i) {
io_[0]->send_data(&seeds_A_[i], sizeof(block));
}
io_[0]->flush();
}
bool recv_and_check_sign_trans(int *invalid) {
receive_ver_key(); /* this step might be replaced by PKI */
for (int j = 0; j < MAX_PVC_ITERATION; ++j) {
if (!verify_trans_sign(j)) {
if (invalid) *invalid = j;
return false;
}
}
return true;
}
bool check_ot_trans(int *invalid) {
hash_t dig;
for (int j = 0; j < MAX_PVC_ITERATION; ++j) {
if (j == chosen_index_) continue;
hsher.hash_once((char *) dig.data(),
rev_ot_tx_[j].data(),
rev_ot_tx_[j].size());
if (dig != sim_ot_tx_dgst_[j]) {
*invalid = j;
std::cerr << "Invalid ot trans" << std::endl;
return false;
}
}
return true;
}
bool recv_and_check_circuit_commits() {
int valid = -1;
for (int j = 0; j < MAX_PVC_ITERATION; ++j) {
aux_io_->recv_data(rev_com_[j], sizeof(Com));
if (j != chosen_index_ &&
0 != std::memcmp(sim_com_[j], rev_com_[j], sizeof(Com))) {
valid = j;
}
}
return valid == -1;
}
template <typename RT>
void simulate_gc_commit(typename TPC<RT>::T const& circ) {
PVCJudge judge;
for (int j = 0; j < MAX_PVC_ITERATION; ++j) {
if (j == chosen_index_) continue;
judge.simulate_gc_commit<RT>(sim_com_[j], seeds_A_[j], circ);
}
}
template <typename RT>
void run_real_ot(typename TPC<RT>::T const& circ, const void *bob_input) {
#pragma omp parallel for num_threads(2)
for (int j = 0; j < MAX_PVC_ITERATION; ++j) {
setup_ot(j);
circ(nullptr, bob_input, TPCF_OT_ONLY);
}
}
template <typename RT>
bool run_real_gc(typename TPC<RT>::T const& circ, const void *bob_input) {
/* run real GC with real input */
setup_real_gc(chosen_index_);
circ(nullptr, bob_input, TPCF_REAL_GC);
if (!check_decomit()) {
std::cout << "Abort: invalid decomitment.\n";
return false;
}
bool ok = false;
RT rt = decode_output_wires<RT>(&ok);
#ifndef NDEBUG
if (ok)
std::cout << "ans = " << rt << std::endl;
else
std::cout << "decoding failed" << std::endl;
#endif
return ok;
}
template <typename RT>
void ot_in_the_head(typename TPC<RT>::T const& circ) {
PVCJudge judge;
for (int j = 0; j < MAX_PVC_ITERATION; ++j) {
if (j == chosen_index_) continue;
judge.ot_in_the_head<RT>(sim_ot_tx_dgst_[j],
seeds_A_[j], seeds_B_[j], circ);
}
}
bool check_decomit() {
hash_t dig;
hash_gc_io_->get_digest((char *)dig.data());
Decom decom;
hash_t plyld;
io_[get_index(chosen_index_)]->recv_data(&decom, sizeof(block));
io_[get_index(chosen_index_)]->recv_data(plyld.data(), sizeof(hash_t));
if (dig != plyld) {
std::cout << "----- invalid gc commit ------\n" << std::endl;
for (auto c : dig) { printf("%02x", c); printf("\n"); }
for (auto c : plyld) { printf("%02x", c); printf("\n"); }
return false;
}
Commitment commiter;
return commiter.open(decom, rev_com_[chosen_index_],
plyld.data(), Hash::DIGEST_SIZE);
}
template <class ReT>
ReT decode_output_wires(bool *ok) {
std::vector<std::thread> workers;
const int n_workers = 8;
const size_t n_jobs = output_labels_.size();
const size_t batch = std::max(1UL, (n_jobs + n_workers - 1) / n_workers);
hsh_ow_.resize(n_jobs);
for (int i = 0; i < n_workers; ++i) {
size_t from = i * batch;
size_t to = std::min(n_jobs, from + batch);
workers.emplace_back([this](size_t id, size_t end) {
block l;
int64_t *d = (int64_t *) &l;
while (id != end) {
l = label_prp.H(output_labels_[id], 2 * id);
hsh_ow_[id++] = *d;
}
}, from, to);
}
int32_t cnt_ow = -1;
io_[get_index(chosen_index_)]->recv_data(&cnt_ow, sizeof(int32_t));
assert(cnt_ow >= 0);
int64_t hsh;
rev_hsh_ow_.reserve(cnt_ow);
for (int i = 0; i < cnt_ow; ++i) {
io_[get_index(chosen_index_)]->recv_data(&hsh, sizeof(int64_t));
rev_hsh_ow_.push_back(hsh);
}
for (auto &w : workers) w.join();
size_t nr_labels = hsh_ow_.size();
if (nr_labels * 2 != rev_hsh_ow_.size()) {
if (ok) *ok = false;
std::cerr << "Need " << nr_labels * 2 << " wires, but got "
<< rev_hsh_ow_.size() << std::endl;
return ReT{};
}
std::vector<bool> bits;
bits.reserve(nr_labels);
for (size_t i = 0; i < nr_labels; ++i) {
int64_t d = hsh_ow_[i];
#ifdef DEBUG
if (d == rev_hsh_ow_[i * 2]) {
bits.push_back(false);
} else if (d == rev_hsh_ow_[i * 2 + 1]) {
bits.push_back(true);
} else {
std::cerr << "Semantic wrong " << i << "\n";
if (ok) *ok = false;
return ReT{};
}
#else
if (d == rev_hsh_ow_[i * 2])
bits.push_back(false);
else
bits.push_back(true);
#endif
}
if (ok) *ok = true;
return nr_labels < 2048 ? Revealer<ReT>::reveal(bits) : "too long";
}
void do_create_cheated_cert(std::ostream& out, int32_t j) {
assert(j >= 0 && j < MAX_PVC_ITERATION);
hsher.reset();
uint8_t buf0[128];
int32_t key_len = ecdsa_serialize_ver_key(buf0, 128, ver_key_);
/* Alice's pk */
out.write((const char *)&key_len, sizeof(int32_t));
out.write((const char *)buf0, key_len);
/* index */
out.write((const char *)&j, sizeof(int32_t));
/* ecdsa signature */
out.write((const char *)&tx_sign_[j]->sig_len, sizeof(int32_t));
out.write((const char *)&tx_sign_[j]->payload[0], tx_sign_[j]->sig_len);
/* GC commitment */
out.write(rev_com_[j], sizeof(Com));
/* hashed OT transcript */
int32_t len = rev_ot_tx_[j].size();
out.write((const char *) &len, sizeof(int32_t));
out.write((const char *) rev_ot_tx_[j].data(), len);
/* seedB */
out.write((const char *)&seeds_B_[j], sizeof(block));
/* seed OT transcript */
len = tx_sd_ot_[j].size();
out.write((const char *) &len, sizeof(int32_t));
out.write((const char *) tx_sd_ot_[j].data(), len);
}
template <class RT>
void judge_cert(typename TPC<RT>::T const& circ) {
PVCJudge judge;
printf("Judge: %d\n", judge.judge<RT>("cheated.cert", circ));
}
void create_cheated_cert(int32_t j) {
std::ofstream fout("cheated.cert");
if (fout.is_open()) {
do_create_cheated_cert(fout, j);
fout.close();
} else {
do_create_cheated_cert(std::cout, j);
}
}
};
}
|
GB_unop__atanh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__atanh_fc64_fc64)
// op(A') function: GB (_unop_tran__atanh_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = catanh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = catanh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = catanh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ATANH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__atanh_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = catanh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = catanh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__atanh_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nvector_openmp.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* LLNS Copyright Start
* Copyright (c) 2014, Lawrence Livermore National Security
* This work was performed under the auspices of the U.S. Department
* of Energy by Lawrence Livermore National Laboratory in part under
* Contract W-7405-Eng-48 and in part under Contract DE-AC52-07NA27344.
* Produced at the Lawrence Livermore National Laboratory.
* All rights reserved.
* For details, see the LICENSE file.
* LLNS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private function prototypes */
/* z=x */
static void VCopy_OpenMP(N_Vector x, N_Vector z);
/* z=x+y */
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z);
/* z=x-y */
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z);
/* z=-x */
static void VNeg_OpenMP(N_Vector x, N_Vector z);
/* z=c(x+y) */
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z);
/* z=c(x-y) */
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z);
/* z=ax+y */
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z);
/* z=ax-y */
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z);
/* y <- ax+y */
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y);
/* x <- ax */
static void VScaleBy_OpenMP(realtype a, N_Vector x);
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v)
{
return SUNDIALS_NVEC_OPENMP;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMP content;
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = N_VGetVectorID_OpenMP;
ops->nvclone = N_VClone_OpenMP;
ops->nvcloneempty = N_VCloneEmpty_OpenMP;
ops->nvdestroy = N_VDestroy_OpenMP;
ops->nvspace = N_VSpace_OpenMP;
ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP;
ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP;
ops->nvlinearsum = N_VLinearSum_OpenMP;
ops->nvconst = N_VConst_OpenMP;
ops->nvprod = N_VProd_OpenMP;
ops->nvdiv = N_VDiv_OpenMP;
ops->nvscale = N_VScale_OpenMP;
ops->nvabs = N_VAbs_OpenMP;
ops->nvinv = N_VInv_OpenMP;
ops->nvaddconst = N_VAddConst_OpenMP;
ops->nvdotprod = N_VDotProd_OpenMP;
ops->nvmaxnorm = N_VMaxNorm_OpenMP;
ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP;
ops->nvwrmsnorm = N_VWrmsNorm_OpenMP;
ops->nvmin = N_VMin_OpenMP;
ops->nvwl2norm = N_VWL2Norm_OpenMP;
ops->nvl1norm = N_VL1Norm_OpenMP;
ops->nvcompare = N_VCompare_OpenMP;
ops->nvinvtest = N_VInvTest_OpenMP;
ops->nvconstrmask = N_VConstrMask_OpenMP;
ops->nvminquotient = N_VMinQuotient_OpenMP;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof(struct _N_VectorContent_OpenMP));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = length;
content->num_threads = num_threads;
content->own_data = SUNFALSE;
content->data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
realtype *data;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
if (length > 0) {
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNFALSE;
NV_DATA_OMP(v) = v_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMP(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMP
*/
void N_VDestroyVectorArray_OpenMP(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMP(N_Vector v)
{
return NV_LENGTH_OMP(v);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMP(N_Vector x)
{
N_VPrintFile_OpenMP(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%11.8g\n", xd[i]);
#else
fprintf(outfile, "%11.8g\n", xd[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMP(N_Vector w)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMP content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = w->ops->nvgetvectorid;
ops->nvclone = w->ops->nvclone;
ops->nvcloneempty = w->ops->nvcloneempty;
ops->nvdestroy = w->ops->nvdestroy;
ops->nvspace = w->ops->nvspace;
ops->nvgetarraypointer = w->ops->nvgetarraypointer;
ops->nvsetarraypointer = w->ops->nvsetarraypointer;
ops->nvlinearsum = w->ops->nvlinearsum;
ops->nvconst = w->ops->nvconst;
ops->nvprod = w->ops->nvprod;
ops->nvdiv = w->ops->nvdiv;
ops->nvscale = w->ops->nvscale;
ops->nvabs = w->ops->nvabs;
ops->nvinv = w->ops->nvinv;
ops->nvaddconst = w->ops->nvaddconst;
ops->nvdotprod = w->ops->nvdotprod;
ops->nvmaxnorm = w->ops->nvmaxnorm;
ops->nvwrmsnormmask = w->ops->nvwrmsnormmask;
ops->nvwrmsnorm = w->ops->nvwrmsnorm;
ops->nvmin = w->ops->nvmin;
ops->nvwl2norm = w->ops->nvwl2norm;
ops->nvl1norm = w->ops->nvl1norm;
ops->nvcompare = w->ops->nvcompare;
ops->nvinvtest = w->ops->nvinvtest;
ops->nvconstrmask = w->ops->nvconstrmask;
ops->nvminquotient = w->ops->nvminquotient;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof(struct _N_VectorContent_OpenMP));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = NV_LENGTH_OMP(w);
content->num_threads = NV_NUM_THREADS_OMP(w);
content->own_data = SUNFALSE;
content->data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMP(N_Vector w)
{
N_Vector v;
realtype *data;
sunindextype length;
v = NULL;
v = N_VCloneEmpty_OpenMP(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMP(w);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMP(N_Vector v)
{
if (NV_OWN_DATA_OMP(v) == SUNTRUE) {
free(NV_DATA_OMP(v));
NV_DATA_OMP(v) = NULL;
}
free(v->content); v->content = NULL;
free(v->ops); v->ops = NULL;
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMP(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Get vector data pointer
*/
realtype *N_VGetArrayPointer_OpenMP(N_Vector v)
{
return((realtype *) NV_DATA_OMP(v));
}
/* ----------------------------------------------------------------------------
* Set vector data pointer
*/
void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v)
{
if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMP(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMP(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMP(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMP(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMP(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMP(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMP(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMP(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMP(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd;
zd = NULL;
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(z))
for (i = 0; i < N; i++) zd[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMP(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMP(x, z);
} else if (c == -ONE) {
VNeg_OpenMP(x, z);
} else {
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*xd[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd, *yd;
sum = ZERO;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += xd[i]*yd[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype tmax, max, *xd;
max = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmax = ZERO;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]);
}
#pragma omp critical
{
if (tmax > max)
max = tmax;
}
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum/N));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd, *wd, *idd;
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
idd = NV_DATA_OMP(id);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
sum += SUNSQR(xd[i]*wd[i]);
}
}
return(SUNRsqrt(sum / N));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype min, *xd;
realtype tmin;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
min = xd[0];
#pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmin = xd[0];
#pragma omp for schedule(static)
for (i = 1; i < N; i++) {
if (xd[i] < tmin) tmin = xd[i];
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd;
sum = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,xd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd, val;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
val = ZERO;
#pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ONE;
else
zd[i] = ONE/xd[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd, *xd, *md;
cd = xd = md = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
cd = NV_DATA_OMP(c);
md = NV_DATA_OMP(m);
temp = ONE;
#pragma omp parallel for default(none) private(i) shared(N,xd,cd,md,temp) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
md[i] = ZERO;
if (cd[i] == ZERO) continue;
if (cd[i] > ONEPT5 || cd[i] < -ONEPT5) {
if ( xd[i]*cd[i] <= ZERO) { temp = ZERO; md[i] = ONE; }
continue;
}
if ( cd[i] > HALF || cd[i] < -HALF) {
if (xd[i]*cd[i] < ZERO ) { temp = ZERO; md[i] = ONE; }
}
}
if (temp == ONE) return (SUNTRUE);
else return(SUNFALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd, *dd, min, tmin, val;
nd = dd = NULL;
N = NV_LENGTH_OMP(num);
nd = NV_DATA_OMP(num);
dd = NV_DATA_OMP(denom);
min = BIG_REAL;
#pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \
num_threads(NV_NUM_THREADS_OMP(num))
{
tmin = BIG_REAL;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (dd[i] != ZERO) {
val = nd[i]/dd[i];
if (val < tmin) tmin = val;
}
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = -xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd, *yd;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
if (a == ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += xd[i];
return;
}
if (a == -ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] -= xd[i];
return;
}
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += a*xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMP(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
xd[i] *= a;
return;
}
|
GB_extract_vector_list.c | //------------------------------------------------------------------------------
// GB_extract_vector_list: extract vector indices for all entries in a matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Constructs a list of vector indices for each entry in a matrix. Creates
// the output J for GB_extractTuples, and I for GB_transpose when the qsort
// method is used.
#include "GB_ek_slice.h"
#define GB_FREE_WORK \
GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ;
bool GB_extract_vector_list // true if successful, false if out of memory
(
// output:
int64_t *GB_RESTRICT J, // size nnz(A) or more
// input:
const GrB_Matrix A,
int nthreads
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (J != NULL) ;
ASSERT (A != NULL) ;
ASSERT (nthreads >= 1) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ah = A->h ;
//--------------------------------------------------------------------------
// determine the # of tasks to use
//--------------------------------------------------------------------------
int64_t anz = GB_NNZ (A) ;
int ntasks = (nthreads == 1) ? 1 : (2 * nthreads) ;
ntasks = GB_IMIN (ntasks, anz) ;
ntasks = GB_IMAX (ntasks, 1) ;
//--------------------------------------------------------------------------
// slice the entries for each task
//--------------------------------------------------------------------------
// Task tid does entries pstart_slice [tid] to pstart_slice [tid+1]-1 and
// vectors kfirst_slice [tid] to klast_slice [tid]. The first and last
// vectors may be shared with prior slices and subsequent slices.
int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ;
if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, ntasks))
{
// out of memory
return (false) ;
}
//--------------------------------------------------------------------------
// extract the vector index for each entry
//--------------------------------------------------------------------------
int tid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < ntasks ; tid++)
{
// if kfirst > klast then task tid does no work at all
int64_t kfirst = kfirst_slice [tid] ;
int64_t klast = klast_slice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// find the part of A(:,k) to be operated on by this task
//------------------------------------------------------------------
int64_t j = (Ah == NULL) ? k : Ah [k] ;
int64_t pA_start, pA_end ;
GB_get_pA_and_pC (&pA_start, &pA_end, NULL,
tid, k, kfirst, klast, pstart_slice, NULL, NULL, Ap) ;
//------------------------------------------------------------------
// extract vector indices of A(:,j)
//------------------------------------------------------------------
for (int64_t p = pA_start ; p < pA_end ; p++)
{
J [p] = j ;
}
}
}
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
return (true) ;
}
|
rf_tron.h | #ifndef RF_TRON_H
#define RF_TRON_H
#include <stdarg.h>
#include <stddef.h>
#include "rf_matrix.h" // to include BLAS/LAPACK header
enum {GD_LS=0, TRON_TR=1, TRON_LS=2};
static void default_print(const char *buf);
class function { // {{{
public:
virtual double fun(void *w) = 0 ;
virtual void grad(void *w, void *g) = 0 ;
virtual void Hv(void *s, void *Hs) = 0 ;
virtual double line_search(void *s, void *w, void *g,
double init_step_size, double *fnew, bool do_update=true) { return 0; }
virtual bool line_search_supported() { return false; }
virtual int get_nr_variable(void) = 0 ;
virtual ~function(void){}
virtual void init(){}
}; // }}}
template<typename val_type>
class TRON { // {{{
public:
TRON(const function *fun_obj, double eps = 0.1, double eps_cg = 0.1, size_t max_iter = 100, size_t max_cg_iter = 20, bool pure_cg = false);
~TRON();
void tron(val_type *w, bool set_w_to_zero = true, int solver_descend_type = TRON_TR);
void tron_trustregion(val_type *w, bool set_w_to_zero = true);
void tron_linesearch(val_type *w, bool set_w_to_zero = true);
void gd_linesearch(val_type *w, bool set_w_to_zero = true);
void set_print_string(void (*i_print) (const char *buf));
void set_eps(val_type eps, val_type eps_cg = 0.1) {this->eps = eps; this->eps_cg = eps_cg;}
private:
int trcg(double delta, val_type *g, val_type *s, val_type *r, double *cg_rnorm);
double norm_inf(size_t n, val_type *x);
double eps;
double eps_cg;
size_t max_iter;
size_t max_cg_iter;
bool pure_cg;
function *fun_obj;
void info(const char *fmt,...);
void (*tron_print_string)(const char *buf);
// local variables for tron
val_type *s, *r, *w_new, *g;
// local variables for trcg
val_type *d, *Hd;
}; // }}}
// ------------- Implementation ------------------------
static void default_print(const char *buf) { // {{{
fputs(buf,stdout);
fflush(stdout);
} // }}}
template<typename val_type>
void TRON<val_type>::info(const char *fmt,...) { // {{{
char buf[BUFSIZ];
va_list ap;
va_start(ap,fmt);
vsprintf(buf,fmt,ap);
va_end(ap);
(*tron_print_string)(buf);
} // }}}
template<typename val_type>
TRON<val_type>::TRON(const function *fun_obj, double eps, double eps_cg, size_t max_iter, size_t max_cg_iter, bool pure_cg) { // {{{
this->fun_obj=const_cast<function *>(fun_obj);
this->eps=eps;
this->eps_cg=eps_cg;
this->max_iter=max_iter;
this->max_cg_iter = max_cg_iter;
this->pure_cg = pure_cg;
tron_print_string = default_print;
ptrdiff_t n = this->fun_obj->get_nr_variable();
s = CALLOC(val_type, n);
r = CALLOC(val_type, n);
w_new = CALLOC(val_type, n);
g = CALLOC(val_type, n);
d = CALLOC(val_type, n);
Hd = CALLOC(val_type, n);
/*
s = new val_type[n];
r = new val_type[n];
w_new = new val_type[n];
g = new val_type[n];
d = new val_type[n];
Hd = new val_type[n];
*/
} // }}}
template<typename val_type>
TRON<val_type>::~TRON() { // {{{
free(s);
free(r);
free(w_new);
free(g);
free(d);
free(Hd);
/*
delete[] g;
delete[] r;
delete[] w_new;
delete[] s;
delete[] d;
delete[] Hd;
*/
} // }}}
template<typename val_type>
void TRON<val_type>::tron(val_type *w, bool set_w_to_zero, int solver_descend_type) { // {{{
// tron_obj->tron(w, true);// zero-initization for w
if(solver_descend_type == TRON_TR || fun_obj->line_search_supported() == false)
TRON<val_type>::tron_trustregion(w, set_w_to_zero);
else {
if(solver_descend_type == TRON_LS)
TRON<val_type>::tron_linesearch(w, set_w_to_zero);
else if(solver_descend_type == GD_LS)
TRON<val_type>::gd_linesearch(w, set_w_to_zero);
}
} // }}}
template<typename val_type>
void TRON<val_type>::tron_trustregion(val_type *w, bool set_w_to_zero) { // {{{
// Parameters for updating the iterates.
double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75;
// Parameters for updating the trust region size delta.
double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4;
ptrdiff_t n = fun_obj->get_nr_variable();
int i, cg_iter;
double delta, snorm;
val_type one=1.0;
double alpha, f, fnew, prered, actred, gs;
size_t search = 1, iter = 1;
ptrdiff_t inc = 1;
if (set_w_to_zero)
for (i=0; i<n; i++)
w[i] = 0;
f = fun_obj->fun(w);
//fprintf(stderr, "fun is done\n");
fun_obj->grad(w, g);
//fprintf(stderr, "grad is done\n");
//fprintf(stderr, "TRON23: max_iter %ld cg_iter %ld, eps %g, eps_cg %g\n", this->max_iter, this->max_cg_iter, this->eps, this->eps_cg);
//fprintf(stderr, "n %ld max_iter %ld max_cg_iter %ld\n", n, max_iter, max_cg_iter);
//delta = dnrm2_(&n, g, &inc);
//delta = sqrt(ddot_(&n, g, &inc, g, &inc));
delta = sqrt(dot(&n, g, &inc, g, &inc));
double gnorm1 = delta;
double gnorm = gnorm1;
//if (gnorm <= eps*gnorm1 || gnorm1 < eps) {
if (gnorm <= eps*gnorm1) {
search = 0;
}
iter = 1;
while (iter <= max_iter && search)
{
double cg_rnorm=0;
cg_iter = trcg(delta, g, s, r, &cg_rnorm);
//memcpy(w_new, w, sizeof(double)*n);
//dcopy_(&n, w, &inc, w_new, &inc);
copy(&n, w, &inc, w_new, &inc);
//daxpy_(&n, &one, s, &inc, w_new, &inc);
axpy(&n, &one, s, &inc, w_new, &inc);
//gs = ddot_(&n, g, &inc, s, &inc);
gs = dot(&n, g, &inc, s, &inc);
//prered = -0.5*(gs-ddot_(&n, s, &inc, r, &inc));
prered = -0.5*(gs-dot(&n, s, &inc, r, &inc));
fnew = fun_obj->fun(w_new);
// Compute the actual reduction.
actred = f - fnew;
// On the first iteration, adjust the initial step bound.
//snorm = dnrm2_(&n, s, &inc);
//snorm = sqrt(ddot_(&n, s, &inc, s, &inc));
snorm = sqrt(dot(&n, s, &inc, s, &inc));
if (iter == 1)
delta = std::min(delta, snorm);
// Compute prediction alpha*snorm of the step.
if (fnew - f - gs <= 0)
alpha = sigma3;
else
alpha = std::max(sigma1, -0.5*(gs/(fnew - f - gs)));
// Update the trust region bound according to the ratio of actual to predicted reduction.
if (actred < eta0*prered)
delta = std::min(std::max(alpha, sigma1)*snorm, sigma2*delta);
else if (actred < eta1*prered)
delta = std::max(sigma1*delta, std::min(alpha*snorm, sigma2*delta));
else if (actred < eta2*prered)
delta = std::max(sigma1*delta, std::min(alpha*snorm, sigma3*delta));
else
delta = std::max(delta, std::min(alpha*snorm, sigma3*delta));
info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d |g| %5.3e\n", iter, actred, prered, delta, f, gnorm, cg_iter, cg_rnorm);
//info("iter %2d act %5.3e pre %5.3e delta %5.3e f %.17g |g| %.17g CG %3d |g| %5.3e\n", iter, actred, prered, delta, f, gnorm, cg_iter, cg_rnorm);
if (actred > eta0*prered)
{
iter++;
//memcpy(w, w_new, sizeof(double)*n);
//dcopy_(&n, w_new, &inc, w, &inc);
copy(&n, w_new, &inc, w, &inc);
f = fnew;
fun_obj->grad(w, g);
//gnorm = dnrm2_(&n, g, &inc);
//gnorm = sqrt(ddot_(&n, g, &inc, g, &inc));
gnorm = sqrt(dot(&n, g, &inc, g, &inc));
if (gnorm <= eps*gnorm1)
break;
}
if (f < -1.0e+32)
{
info("WARNING: f < -1.0e+32\n");
break;
}
if (fabs(actred) <= 0 && prered <= 0)
{
info("WARNING: actred and prered <= 0\n");
break;
}
if (fabs(actred) <= 1.0e-12*fabs(f) &&
fabs(prered) <= 1.0e-12*fabs(f))
{
info("WARNING: actred and prered too small\n");
break;
}
}
} // }}}
template<typename val_type>
void TRON<val_type>::tron_linesearch(val_type *w, bool set_w_to_zero) { // {{{
ptrdiff_t n = fun_obj->get_nr_variable();
int i;
val_type step_size=1.0;
double f, fnew, actred;
double init_step_size = 1;
const double delta=0; // delta = 0 => trcg reduces to standard CG
//val_type one=1.0;
size_t search = 1, iter = 1, cg_iter = 0;
ptrdiff_t inc = 1;
if (set_w_to_zero)
for (i=0; i<n; i++)
w[i] = 0;
// calculate gradient norm at w=0 for stopping condition
#pragma omp parallel for schedule(static)
for(i=0;i<n;i++)
w_new[i] = 0;
f = fun_obj->fun(w_new);
fun_obj->grad(w_new, g);
double gnorm0 = sqrt(dot(&n, g, &inc, g, &inc));
if (!set_w_to_zero) {
f = fun_obj->fun(w);
fun_obj->grad(w, g);
}
double gnorm = sqrt(dot(&n, g, &inc, g, &inc));
//if (gnorm <= eps*gnorm1 || gnorm1 < eps)
if (gnorm <= eps*gnorm0)
search = 0;
iter = 1;
bool do_update = true; // perform w/grad/Hv updates inside line_search
while (iter <= max_iter && search)
{
double cg_rnorm=0;
cg_iter = trcg(delta, g, s, r, &cg_rnorm);
step_size = fun_obj->line_search(s, w, g, init_step_size, &fnew, do_update);
actred = f - fnew;
if(step_size == 0) {
info("WARNING: line search fails\n");
break;
}
if(!do_update) {
//daxpy_(&n, &step_size, s, &inc, w, &inc);
axpy(&n, &step_size, s, &inc, w, &inc);
}
info("iter %2d f %5.3e |g| %5.3e CG %3d step_size %5.3e |g| %5.3e\n", iter, f, gnorm, cg_iter, step_size, cg_rnorm);
f = fnew;
iter++;
if(!do_update) fun_obj->fun(w);
fun_obj->grad(w, g);
gnorm = sqrt(dot(&n, g, &inc, g, &inc));
if (gnorm <= eps*gnorm0)
break;
if (f < -1.0e+32) {
info("WARNING: f < -1.0e+32\n");
break;
}
if (fabs(actred) <= 1.0e-12*fabs(f)) {
info("WARNING: actred too small\n");
break;
}
}
} // }}}
template<typename val_type>
void TRON<val_type>::gd_linesearch(val_type *w, bool set_w_to_zero) { // {{{
ptrdiff_t n = fun_obj->get_nr_variable();
int i;
val_type step_size=1.0;
double f, fnew, actred;
double init_step_size = 1;
//const double delta=0; // delta = 0 => trcg reduces to standard CG
//val_type one=1.0;
size_t search = 1, iter = 1;
ptrdiff_t inc = 1;
if (set_w_to_zero)
for (i=0; i<n; i++)
w[i] = 0;
// calculate gradient norm at w=0 for stopping condition
#pragma omp parallel for schedule(static)
for(i=0;i<n;i++)
w_new[i] = 0;
f = fun_obj->fun(w_new);
fun_obj->grad(w_new, g);
double gnorm0 = sqrt(dot(&n, g, &inc, g, &inc));
if (!set_w_to_zero) {
f = fun_obj->fun(w);
fun_obj->grad(w, g);
}
double gnorm = sqrt(dot(&n, g, &inc, g, &inc));
//if (gnorm <= eps*gnorm1 || gnorm1 < eps)
if (gnorm <= eps*gnorm0)
search = 0;
iter = 1;
bool do_update = true; // perform w/grad/Hv updates inside line_search
while (iter <= (max_iter*max_cg_iter) && search)
{
//double cg_rnorm=0;
//cg_iter = trcg(delta, g, s, r, &cg_rnorm);
#pragma omp parallel for
for (i=0; i<n; i++)
s[i] = -g[i];
step_size = fun_obj->line_search(s, w, g, init_step_size, &fnew, do_update);
actred = f - fnew;
if(step_size == 0) {
info("WARNING: line search fails\n");
break;
}
if(!do_update)
axpy(&n, &step_size, s, &inc, w, &inc);
info("iter %2d f %5.3e |g| %5.3e step_size %5.3e\n", iter, f, gnorm, step_size);
f = fnew;
iter++;
if(!do_update) fun_obj->fun(w);
fun_obj->grad(w, g);
gnorm = sqrt(dot(&n, g, &inc, g, &inc));
if (gnorm <= eps*gnorm0)
break;
if (f < -1.0e+32) {
info("WARNING: f < -1.0e+32\n");
break;
}
if (fabs(actred) <= 1.0e-12*fabs(f)) {
info("WARNING: actred too small\n");
break;
}
}
} // }}}
template<typename val_type>
int TRON<val_type>::trcg(double delta, val_type *g, val_type *s, val_type *r, double *cg_rnorm) { // {{{
int i;
ptrdiff_t n = fun_obj->get_nr_variable();
ptrdiff_t inc = 1;
val_type one = 1;
/*
double *d = new double[n];
double *Hd = new double[n];
*/
val_type rTr, rnewTrnew, alpha, beta, cgtol;
#pragma omp parallel for
for (i=0; i<n; i++)
{
s[i] = 0;
r[i] = -g[i];
d[i] = r[i];
}
//cgtol = 0.1*dnrm2_(&n, g, &inc);
//cgtol = 0.1*sqrt(ddot_(&n, g, &inc, g, &inc));
//cgtol = eps*sqrt(ddot_(&n, g, &inc, g, &inc));
cgtol = eps_cg*sqrt(dot(&n, g, &inc, g, &inc));
//cgtol = eps*sqrt(dot(&n, g, &inc, g, &inc));
size_t cg_iter = 0;
//rTr = ddot_(&n, r, &inc, r, &inc);
rTr = dot(&n, r, &inc, r, &inc);
//double rTr_init = rTr;
while (1)
{
//*cg_rnorm = sqrt(ddot_(&n, r, &inc, r, &inc));
*cg_rnorm = sqrt(dot(&n, r, &inc, r, &inc));
if (*cg_rnorm <= cgtol)
break;
/*
*cg_rnorm = sqrt(rTr);
if((rTr < eps_cg * rTr_init) && (rTr < eps_cg))
break;
*/
if (max_cg_iter > 0 && cg_iter >= max_cg_iter)
break;
cg_iter++;
fun_obj->Hv(d, Hd);
//alpha = rTr/ddot_(&n, d, &inc, Hd, &inc);
alpha = rTr/dot(&n, d, &inc, Hd, &inc);
//daxpy_(&n, &alpha, d, &inc, s, &inc);
axpy(&n, &alpha, d, &inc, s, &inc);
//if (sqrt(ddot_(&n, s, &inc, s, &inc)) > delta)
if (!pure_cg && delta > 0 && sqrt(dot(&n, s, &inc, s, &inc)) > delta)
{
info("cg reaches trust region boundary\n");
alpha = -alpha;
//daxpy_(&n, &alpha, d, &inc, s, &inc);
axpy(&n, &alpha, d, &inc, s, &inc);
//double std = ddot_(&n, s, &inc, d, &inc);
double std = dot(&n, s, &inc, d, &inc);
//double sts = ddot_(&n, s, &inc, s, &inc);
double sts = dot(&n, s, &inc, s, &inc);
//double dtd = ddot_(&n, d, &inc, d, &inc);
double dtd = dot(&n, d, &inc, d, &inc);
double dsq = delta*delta;
double rad = sqrt(std*std + dtd*(dsq-sts));
if (std >= 0)
alpha = (dsq - sts)/(std + rad);
else
alpha = (rad - std)/dtd;
//daxpy_(&n, &alpha, d, &inc, s, &inc);
axpy(&n, &alpha, d, &inc, s, &inc);
alpha = -alpha;
//daxpy_(&n, &alpha, Hd, &inc, r, &inc);
axpy(&n, &alpha, Hd, &inc, r, &inc);
break;
}
alpha = -alpha;
//daxpy_(&n, &alpha, Hd, &inc, r, &inc);
axpy(&n, &alpha, Hd, &inc, r, &inc);
//rnewTrnew = ddot_(&n, r, &inc, r, &inc);
rnewTrnew = dot(&n, r, &inc, r, &inc);
beta = rnewTrnew/rTr;
//dscal_(&n, &beta, d, &inc);
val_type tmp = beta - (val_type)1.0;
//daxpy_(&n, &tmp, d, &inc, d, &inc);
axpy(&n, &tmp, d, &inc, d, &inc);
//daxpy_(&n, &one, r, &inc, d, &inc);
axpy(&n, &one, r, &inc, d, &inc);
rTr = rnewTrnew;
}
return(cg_iter);
} // }}}
template<typename val_type>
double TRON<val_type>::norm_inf(size_t n, val_type *x) { // {{{
double dmax = fabs(x[0]);
for (int i=1; i<n; i++)
if (fabs(x[i]) >= dmax)
dmax = fabs(x[i]);
return(dmax);
} // }}}
template<typename val_type>
void TRON<val_type>::set_print_string(void (*print_string) (const char *buf)) { // {{{
tron_print_string = print_string;
} // }}}
#endif // end of RF_TRON_H
|
GB_unop__one_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__one_fc64_fc64
// op(A') function: GB_unop_tran__one_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: ;
// unaryop: cij = GxB_CMPLX(1,0)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GxB_CMPLX(1,0) ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = GxB_CMPLX(1,0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__one_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__one_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_fp32_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_bool
// op(A') function: GB_tran__identity_fp32_bool
// C type: float
// A type: bool
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
bool
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_bool
(
float *restrict Cx,
const bool *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_dem_search.h | //
// Project Name: Kratos
// Last Modified by: $Author: clabra $
// Date: $Date: 2007-03-29 19:37:47 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_OMP_DEM_SEARCH_H_INCLUDED )
#define KRATOS_OMP_DEM_SEARCH_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// include kratos definitions
#include "includes/define.h"
// Project includes
#include "spatial_containers/dem_search.h"
#include "utilities/openmp_utils.h"
// Configures
#include "discrete_particle_configure.h"
#include "geometrical_object_configure.h"
#include "node_configure.h"
// Search
#include "spatial_containers/bins_dynamic_objects.h"
#include "spatial_containers/bins_dynamic.h"
#include "custom_search/bins_dynamic_objects_periodic.h"
// External includes
/* Timer defines */
#include "utilities/timer.h"
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class OMP_DEMSearch : public DEMSearch<OMP_DEMSearch>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of OMP_DEMSearch
KRATOS_CLASS_POINTER_DEFINITION(OMP_DEMSearch);
typedef PointType* PtrPointType;
typedef std::vector<PtrPointType>* PointVector;
typedef std::vector<PtrPointType>::iterator PointIterator;
typedef double* DistanceVector;
typedef double* DistanceIterator;
//Configure Types
typedef DiscreteParticleConfigure<3> ElementConfigureType; //Element
typedef NodeConfigure<3> NodeConfigureType; //Node
typedef GeometricalConfigure<3> GeometricalConfigureType; //Generic Geometry
//Bin Types
typedef BinsObjectDynamic<ElementConfigureType> BinsType;
typedef BinsObjectDynamicPeriodic<ElementConfigureType> BinsTypePeriodic;
typedef std::unique_ptr<BinsType> BinsUniquePointerType;
typedef BinsObjectDynamic<NodeConfigureType> NodeBinsType;
typedef BinsObjectDynamicPeriodic<NodeConfigureType> NodeBinsTypePeriodic;
typedef std::unique_ptr<NodeBinsType> NodeBinsUniquePointerType;
typedef BinsObjectDynamic<GeometricalConfigureType> GeometricalBinsType;
//GeoimetricalObject
typedef PointerVectorSet<GeometricalObject, IndexedObject> GeometricalObjectType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
OMP_DEMSearch(const double domain_min_x = 0.0, const double domain_min_y = 0.0, const double domain_min_z = 0.0,
const double domain_max_x = -1.0, const double domain_max_y = -1.0, const double domain_max_z = -1.0)
{
mDomainPeriodicity = (domain_min_x <= domain_max_x) ? true : false;
}
/// Destructor.
~OMP_DEMSearch(){
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
// KRATOS_TRY
//
// int MaxNumberOfElements = rStructureElements.size();
//
// ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
// ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
//
// GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
// GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
//
// BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
// SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
//
// for (ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
// BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
//
// for (ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
// SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
//
// GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
//
// #pragma omp parallel
// {
// GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
// DistanceType localResultsDistances(MaxNumberOfElements);
// std::size_t NumberOfResults = 0;
//
// #pragma omp for
// for (std::size_t i = 0; i < elements_sear.size(); ++i)
// {
// GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
// DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
//
// NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
//
// rResults[i].reserve(NumberOfResults);
//
// for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
// {
// Element::Pointer elem = dynamic_pointer_cast<Element>(*it);
// rResults[i].push_back(elem);
// rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
// }
// }
// }
//
// KRATOS_CATCH("")
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for schedule(dynamic, 100) //schedule(guided)
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(elements_array[i],radius,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
//MAJOR TODO: creating and destroying (when leaving the function) this BINS is not parallel and takes a significant time if we search at every time step. Can we re-use a bins and avoid allocation and deallocation?? MA
KRATOS_CATCH("")
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadius(elements_array[i],radius,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(elements_array[i],radius,ResultsPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsType bins(elements_ModelPart.begin(), elements_ModelPart.end());
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = bins.SearchObjectsInRadius(elements_array[i],radius,ResultsPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfNodes = rNodes.size();
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
// NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
NodeBinsUniquePointerType p_bins = GetBins(nodes_ModelPart);
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
DistanceType localResultsDistances(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(nodes_array[i], Radius[i], ResultsPointer, ResultsDistancesPointer, MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
DistanceType localResultsDistances(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(nodes_array[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(nodes_array[i],Radius[i],ResultsPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
NumberOfResults = bins.SearchObjectsInRadius(nodes_array[i],Radius[i],ResultsPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&> (rStructureElements.GetContainer());
ConditionsContainerType::ContainerType& elements_sear = const_cast<ConditionsContainerType::ContainerType&>(rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Condition::Pointer elem = dynamic_pointer_cast<Condition>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&> (rStructureElements.GetContainer());
ConditionsContainerType::ContainerType& elements_sear = const_cast<ConditionsContainerType::ContainerType&>(rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Condition::Pointer elem = dynamic_pointer_cast<Condition>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusExclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ConditionsContainerType::ContainerType& elements_bins = const_cast<ConditionsContainerType::ContainerType&>(rStructureElements.GetContainer());
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Element::Pointer elem = dynamic_pointer_cast<Element>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusInclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ConditionsContainerType::ContainerType& elements_bins = const_cast<ConditionsContainerType::ContainerType&>(rStructureElements.GetContainer());
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Element::Pointer elem = dynamic_pointer_cast<Element>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const override
{
std::stringstream buffer;
buffer << "OpenMPDemSearch" ;
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const override {rOStream << "OpenMPDemSearch";}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///
BinsUniquePointerType GetBins(ElementsContainerType::ContainerType& r_model_part_container)
{
if (mDomainPeriodicity){
return std::unique_ptr<BinsType>(new BinsTypePeriodic(r_model_part_container.begin(), r_model_part_container.end(), this->mDomainMin, this->mDomainMax));
}
else {
return std::unique_ptr<BinsType>(new BinsType(r_model_part_container.begin(), r_model_part_container.end()));
}
}
NodeBinsUniquePointerType GetBins(NodesContainerType::ContainerType& r_model_part_container)
{
if (mDomainPeriodicity){
return std::unique_ptr<NodeBinsType>(new NodeBinsTypePeriodic(r_model_part_container.begin(), r_model_part_container.end(), this->mDomainMin, this->mDomainMax));
}
else {
return std::unique_ptr<NodeBinsType>(new NodeBinsType(r_model_part_container.begin(), r_model_part_container.end()));
}
}
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
OMP_DEMSearch& operator=(OMP_DEMSearch const& rOther)
{
return *this;
}
/// Copy constructor.
OMP_DEMSearch(OMP_DEMSearch const& rOther)
{
*this = rOther;
}
///@}
}; // Class DEMSearch
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
// inline std::istream& operator >> (std::istream& rIStream,
// DEMSearch& rThis){return rIStream;}
//
// /// output stream function
// inline std::ostream& operator << (std::ostream& rOStream,
// const DEMSearch& rThis)
// {
// rThis.PrintInfo(rOStream);
// rOStream << std::endl;
// rThis.PrintData(rOStream);
//
// return rOStream;
// }
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_DEM_SEARCH_H_INCLUDED defined
|
GB_subassign_16.c | //------------------------------------------------------------------------------
// GB_subassign_16: C(I,J)<!M> += A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// Method 16: C(I,J)<!M> += A ; using S
// M: present
// Mask_comp: true
// C_replace: false
// accum: present
// A: matrix
// S: constructed
#define GB_FREE_WORK GB_FREE_TWO_SLICE
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_16
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct, // if true, use the only structure of M
const GrB_BinaryOp accum,
const GrB_Matrix A,
const GrB_Matrix S,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ;
GB_GET_MASK ;
const bool M_is_hyper = M->is_hyper ;
const int64_t Mnvec = M->nvec ;
const int64_t mvlen = M->vlen ;
GB_GET_A ;
GB_GET_S ;
GB_GET_ACCUM ;
//--------------------------------------------------------------------------
// Method 16: C(I,J)<!M> += A ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal. All entries in A+S must be traversed.
// Compare with Method 04.
//--------------------------------------------------------------------------
// Parallel: Z=A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
GB_SUBASSIGN_TWO_SLICE (A, S) ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
int taskid ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and S(:,j)
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ;
GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ;
//------------------------------------------------------------------
// get M(:,j)
//------------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == mvlen ;
//------------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//------------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = Si [pS] ;
int64_t iA = Ai [pA] ;
if (iS < iA)
{
// S (i,j) is present but A (i,j) is not
// ----[C . 1] or [X . 1]-------------------------------
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
// ----[C . 0] or [X . 0]-------------------------------
// [C . 0]: action: ( C ): no change, with accum
// [X . 0]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (iA < iS)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
mij = !mij ;
if (mij)
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (A) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
mij = !mij ;
if (mij)
{
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =A ): A to C no accum
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
}
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S(:,j)
// while list A (:,j) has entries. List S (:,j) exhausted
while (pA < pA_end)
{
// S (i,j) is not present, A (i,j) is present
int64_t iA = Ai [pA] ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
mij = !mij ;
if (mij)
{
// ----[. A 1]----------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (A) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get A(:,j) and S(:,j)
//------------------------------------------------------------------
int64_t j = (Zh == NULL) ? k : Zh [k] ;
GB_GET_MAPPED_VECTOR (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X) ;
GB_GET_MAPPED_VECTOR (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S) ;
//------------------------------------------------------------------
// get M(:,j)
//------------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == mvlen ;
//------------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//------------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = Si [pS] ;
int64_t iA = Ai [pA] ;
if (iS < iA)
{
// S (i,j) is present but A (i,j) is not
GB_NEXT (S) ;
}
else if (iA < iS)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
mij = !mij ;
if (mij)
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
}
GB_NEXT (A) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// while list A (:,j) has entries. List S (:,j) exhausted
while (pA < pA_end)
{
// S (i,j) is not present, A (i,j) is present
int64_t iA = Ai [pA] ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
mij = !mij ;
if (mij)
{
// ----[. A 1]----------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (Ax +(pA*asize)) ;
}
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
preGraphConstruction.c | /*
Copyright 2007, 2008 Daniel Zerbino (zerbino@ebi.ac.uk)
This file is part of Velvet.
Velvet is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Velvet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Velvet; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "globals.h"
#include "preGraph.h"
#include "recycleBin.h"
#include "roadMap.h"
#include "readSet.h"
#include "concatenatedPreGraph.h"
#include "utility.h"
#include "kmer.h"
#include "tightString.h"
#include "binarySequences.h"
#define ADENINE 0
#define CYTOSINE 1
#define GUANINE 2
#define THYMINE 3
#ifdef _OPENMP
Coordinate *annotationOffset = NULL;
static omp_lock_t *nodeLocks = NULL;
static void createNodeLocks(PreGraph *preGraph)
{
IDnum nbNodes;
IDnum nodeIndex;
nbNodes = preNodeCount_pg(preGraph) + 1;
if (nodeLocks)
free (nodeLocks);
nodeLocks = mallocOrExit(nbNodes, omp_lock_t);
#pragma omp parallel for
for (nodeIndex = 0; nodeIndex < nbNodes; nodeIndex++)
omp_init_lock(nodeLocks + nodeIndex);
}
static void lockNode(IDnum preNodeID)
{
omp_set_lock(nodeLocks + preNodeID);
}
static void unLockNode(IDnum preNodeID)
{
omp_unset_lock(nodeLocks + preNodeID);
}
static void lockTwoNodes(IDnum preNodeID, IDnum preNode2ID)
{
if (preNodeID < 0)
preNodeID = -preNodeID;
if (preNode2ID < 0)
preNode2ID = -preNode2ID;
/* Lock lowest ID first to avoid deadlocks */
if (preNodeID == preNode2ID)
omp_set_lock (nodeLocks + preNodeID);
else if (preNodeID < preNode2ID)
{
omp_set_lock (nodeLocks + preNodeID);
omp_set_lock (nodeLocks + preNode2ID);
}
else
{
omp_set_lock (nodeLocks + preNode2ID);
omp_set_lock (nodeLocks + preNodeID);
}
}
static void unLockTwoNodes(IDnum preNodeID, IDnum preNode2ID)
{
if (preNodeID < 0)
preNodeID = -preNodeID;
if (preNode2ID < 0)
preNode2ID = -preNode2ID;
omp_unset_lock (nodeLocks + preNodeID);
if (preNodeID != preNode2ID)
omp_unset_lock (nodeLocks + preNode2ID);
}
#endif
// Internal structure used to mark the ends of an Annotation
struct insertionMarker_st {
Annotation *annot;
boolean isStart;
} ATTRIBUTE_PACKED;
Coordinate getInsertionMarkerPosition(InsertionMarker * marker)
{
if (marker->isStart)
return getStart(marker->annot);
else
return getFinish(marker->annot);
}
int compareInsertionMarkers(const void *A, const void *B)
{
Coordinate Apos =
getInsertionMarkerPosition((InsertionMarker *) A);
Coordinate Bpos =
getInsertionMarkerPosition((InsertionMarker *) B);
if (Apos < Bpos)
return -1;
else if (Apos == Bpos)
return 0;
else
return 1;
}
// Applies mergeSort to each insertion marker list (in order of position)
static void
orderInsertionMarkers(InsertionMarker ** insMarkers,
IDnum * markerCounters, RoadMapArray * rdmaps)
{
IDnum sequenceIndex;
IDnum sequenceCounter = rdmaps->length;
velvetLog("Ordering insertion markers\n");
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (sequenceIndex = 1; sequenceIndex <= sequenceCounter;
sequenceIndex++) {
qsort(insMarkers[sequenceIndex],
markerCounters[sequenceIndex],
sizeof(InsertionMarker), compareInsertionMarkers);
}
}
// Creates insertion marker lists
static void
setInsertionMarkers(RoadMapArray * rdmaps,
IDnum * markerCounters,
InsertionMarker ** veryLastMarker,
InsertionMarker ** insertionMarkers)
{
IDnum sequenceCounter = rdmaps->length;
IDnum sequenceIndex, sequenceIndex2;
Coordinate totalCount = 0;
RoadMap *rdmap;
Annotation *annot = rdmaps->annotations;
InsertionMarker *nextMarker, *newMarker;
IDnum annotIndex, lastAnnotIndex;
InsertionMarker **insMarkers =
callocOrExit(rdmaps->length + 1, InsertionMarker *);
// Counting insertion markers
for (sequenceIndex = 1; sequenceIndex < sequenceCounter + 1;
sequenceIndex++) {
//velvetLog("Going through sequence %d\n", sequenceIndex);
rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1);
lastAnnotIndex = getAnnotationCount(rdmap);
// Set insertion markers in previous sequences :
for (annotIndex = 0; annotIndex < lastAnnotIndex;
annotIndex++) {
if (getAnnotSequenceID(annot) > 0) {
markerCounters[getAnnotSequenceID(annot)]
+= 2;
} else {
markerCounters[-getAnnotSequenceID(annot)]
+= 2;
}
totalCount += 2;
annot = getNextAnnotation(annot);
}
}
// Allocating space
*insertionMarkers = callocOrExit(totalCount, InsertionMarker);
*veryLastMarker = *insertionMarkers + totalCount;
// Pointing each node to its space
nextMarker = *insertionMarkers;
for (sequenceIndex = 1; sequenceIndex < sequenceCounter + 1;
sequenceIndex++) {
insMarkers[sequenceIndex] = nextMarker;
nextMarker = nextMarker + markerCounters[sequenceIndex];
markerCounters[sequenceIndex] = 0;
}
// Filling up space with data
annot = rdmaps->annotations;
for (sequenceIndex = 1; sequenceIndex < sequenceCounter + 1;
sequenceIndex++) {
//velvetLog("Going through sequence %d\n", sequenceIndex);
rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1);
lastAnnotIndex = getAnnotationCount(rdmap);
// Set insertion markers in previous sequences :
for (annotIndex = 0; annotIndex < lastAnnotIndex;
annotIndex++) {
sequenceIndex2 = getAnnotSequenceID(annot);
if (sequenceIndex2 > 0) {
newMarker =
insMarkers[sequenceIndex2] +
(markerCounters[sequenceIndex2])++;
newMarker->annot = annot;
newMarker->isStart = true;
newMarker =
insMarkers[sequenceIndex2] +
(markerCounters[sequenceIndex2])++;
newMarker->annot = annot;
newMarker->isStart = false;
} else {
incrementAnnotationCoordinates(annot);
newMarker =
insMarkers[-sequenceIndex2] +
(markerCounters[-sequenceIndex2])++;
newMarker->annot = annot;
newMarker->isStart = true;
newMarker =
insMarkers[-sequenceIndex2] +
(markerCounters[-sequenceIndex2])++;
newMarker->annot = annot;
newMarker->isStart = false;
}
annot = getNextAnnotation(annot);
}
}
orderInsertionMarkers(insMarkers, markerCounters, rdmaps);
free(insMarkers);
}
// Counts how many preNodes are to be created to allocate appropriate memory
static void
countPreNodes(RoadMapArray * rdmaps, PreGraph * preGraph,
IDnum * markerCounters, InsertionMarker * insertionMarkers,
InsertionMarker * veryLastMarker)
{
Annotation *annot = rdmaps->annotations;
InsertionMarker *currentMarker = insertionMarkers;
IDnum markerIndex, lastMarkerIndex;
IDnum sequenceIndex;
Coordinate currentPosition, nextStop;
IDnum preNodeCounter = 0;
RoadMap *rdmap;
IDnum annotIndex, lastAnnotIndex;
// Now that we have read all of the annotations, we go on to create the preNodes and tie them up
for (sequenceIndex = 1;
sequenceIndex <= sequenceCount_pg(preGraph);
sequenceIndex++) {
rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1);
annotIndex = 0;
lastAnnotIndex = getAnnotationCount(rdmap);
markerIndex = 0;
lastMarkerIndex = markerCounters[sequenceIndex];
currentPosition = 0;
while (annotIndex < lastAnnotIndex) {
if (markerIndex == lastMarkerIndex
|| getPosition(annot) <=
getInsertionMarkerPosition(currentMarker))
nextStop = getPosition(annot);
else
nextStop =
getInsertionMarkerPosition
(currentMarker);
if (currentPosition != nextStop) {
preNodeCounter++;
currentPosition = nextStop;
}
while (markerIndex < lastMarkerIndex
&& getInsertionMarkerPosition(currentMarker)
== currentPosition) {
currentMarker++;
markerIndex++;
}
while (annotIndex < lastAnnotIndex
&& getPosition(annot) == currentPosition) {
annot = getNextAnnotation(annot);
annotIndex++;
}
}
while (markerIndex < lastMarkerIndex) {
if (currentPosition ==
getInsertionMarkerPosition(currentMarker)) {
currentMarker++;
markerIndex++;
} else {
preNodeCounter++;
currentPosition =
getInsertionMarkerPosition
(currentMarker);
}
}
}
allocatePreNodeSpace_pg(preGraph, preNodeCounter);
}
static void convertInsertionMarkers(InsertionMarker * insertionMarkers,
InsertionMarker * veryLastMarker,
IDnum * chains)
{
InsertionMarker *marker;
Annotation *annot;
for (marker = insertionMarkers; marker != veryLastMarker; marker++) {
annot = marker->annot;
if (getAnnotSequenceID(annot) > 0) {
if (marker->isStart) {
if (getStartID(annot) == 0)
setStartID(annot,
chains
[getAnnotSequenceID
(annot)]);
else
setStartID(annot,
getStartID(annot) + 1);
}
} else {
if (marker->isStart)
setStartID(annot, -getStartID(annot));
else {
if (getFinishID(annot) == 0)
setFinishID(annot,
-chains
[-getAnnotSequenceID
(annot)]);
else
setFinishID(annot,
-getFinishID(annot) -
1);
}
}
}
free(insertionMarkers);
}
static void convertMarker(InsertionMarker * marker, IDnum nodeID)
{
if (marker->isStart)
setStartID(marker->annot, nodeID);
else
setFinishID(marker->annot, nodeID);
}
// Creates the preNode using insertion marker and annotation lists for each sequence
static void
// Creates the preNode using insertion marker and annotation lists for each sequence
createPreNodes(RoadMapArray * rdmaps, PreGraph * preGraph,
IDnum * markerCounters, InsertionMarker * insertionMarkers,
InsertionMarker * veryLastMarker, IDnum * chains,
SequencesReader *seqReadInfo, int WORDLENGTH)
{
char *sequenceFilename = seqReadInfo->m_seqFilename;
Annotation *annot = rdmaps->annotations;
IDnum latestPreNodeID;
InsertionMarker *currentMarker = insertionMarkers;
IDnum sequenceIndex;
Coordinate currentPosition, nextStop;
IDnum preNodeCounter = 1;
FILE *file = NULL;
char line[50000];
int lineLength = 50000;
Coordinate readIndex;
boolean tooShort;
Kmer initialKmer;
char c;
RoadMap *rdmap;
IDnum annotIndex, lastAnnotIndex;
IDnum markerIndex, lastMarkerIndex;
if (!seqReadInfo->m_bIsBinary) {
file = fopen(sequenceFilename, "r");
if (file == NULL)
exitErrorf(EXIT_FAILURE, true, "Could not read %s", sequenceFilename);
// Reading sequence descriptor in first line
if (sequenceCount_pg(preGraph) > 0 && !fgets(line, lineLength, file))
exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename);
seqReadInfo->m_pFile = file;
}
// Now that we have read all of the annotations, we go on to create the preNodes and tie them up
for (sequenceIndex = 1;
sequenceIndex <= sequenceCount_pg(preGraph);
sequenceIndex++) {
if (sequenceIndex % 1000000 == 0)
velvetLog("Sequence %li / %li\n", (long) sequenceIndex,
(long) sequenceCount_pg(preGraph));
if (!seqReadInfo->m_bIsBinary) {
while (line[0] != '>')
if (!fgets(line, lineLength, file))
exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename);
}
rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1);
annotIndex = 0;
lastAnnotIndex = getAnnotationCount(rdmap);
markerIndex = 0;
lastMarkerIndex = markerCounters[sequenceIndex];
currentPosition = 0;
// Reading first (k-1) nucleotides
tooShort = false;
clearKmer(&initialKmer);
//velvetLog("Initial kmer: ");
TightString *tString = NULL;
char *strString = NULL;
if (seqReadInfo->m_bIsBinary) {
tString = getTightStringInArray(seqReadInfo->m_sequences->tSequences, sequenceIndex - 1);
strString = readTightString(tString);
}
for (readIndex = 0; readIndex < WORDLENGTH - 1;
readIndex++) {
if (seqReadInfo->m_bIsBinary) {
if (readIndex >= tString->length) {
tooShort = true;
break;
}
c = strString[readIndex];
} else {
c = getc(file);
while (c == '\n' || c == '\r')
c = getc(file);
if (c == '>' || c == 'M' || c == EOF) {
ungetc(c, file);
tooShort = true;
break;
}
}
switch (c) {
case 'A':
case 'N':
pushNucleotide(&initialKmer, ADENINE);
break;
case 'C':
pushNucleotide(&initialKmer, CYTOSINE);
break;
case 'G':
pushNucleotide(&initialKmer, GUANINE);
break;
case 'T':
pushNucleotide(&initialKmer, THYMINE);
break;
default:
velvetLog
("Irregular sequence file: are you sure your Sequence and Roadmap file come from the same source?\n");
fflush(stdout);
abort();
}
}
if (tooShort) {
//velvetLog("Skipping short read.. %d\n", sequenceIndex);
chains[sequenceIndex] = preNodeCounter;
if (seqReadInfo->m_bIsBinary) {
free(strString);
} else {
if (!fgets(line, lineLength, file) && sequenceIndex < sequenceCount_pg(preGraph))
exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename);
}
continue;
}
char *currString = NULL;
if (seqReadInfo->m_bIsBinary) {
currString = &strString[readIndex];
seqReadInfo->m_ppCurrString = &currString;
}
latestPreNodeID = 0;
while (annotIndex < lastAnnotIndex) {
if (markerIndex == lastMarkerIndex
|| getPosition(annot) <=
getInsertionMarkerPosition(currentMarker))
nextStop = getPosition(annot);
else {
nextStop =
getInsertionMarkerPosition
(currentMarker);
}
if (currentPosition != nextStop) {
if (seqReadInfo->m_bIsBinary) {
if (readIndex >= tString->length) {
velvetLog("readIndex %ld beyond string len %ld\n", (uint64_t) readIndex, (uint64_t) tString->length);
exit(1);
}
}
//if (sequenceIndex == 481)
// velvetLog("Adding pre nodes from %lli to %lli\n", (long long) currentPosition, (long long) nextStop);
addPreNodeToPreGraph_pg(preGraph,
currentPosition,
nextStop,
seqReadInfo,
&initialKmer,
preNodeCounter);
if (latestPreNodeID == 0) {
chains[sequenceIndex] =
preNodeCounter;
}
latestPreNodeID = preNodeCounter++;
currentPosition = nextStop;
}
while (markerIndex < lastMarkerIndex
&& getInsertionMarkerPosition(currentMarker)
== nextStop) {
convertMarker(currentMarker,
latestPreNodeID);
currentMarker++;
markerIndex++;
}
while (annotIndex < lastAnnotIndex
&& getPosition(annot) == nextStop) {
for (readIndex = 0;
readIndex <
getAnnotationLength(annot);
readIndex++) {
if (seqReadInfo->m_bIsBinary) {
c = *currString;
currString += 1; // increment the pointer
} else {
c = getc(file);
while (!isalpha(c))
c = getc(file);
}
//if (sequenceIndex == 481)
// velvetLog("(%c)", c);
switch (c) {
case 'A':
case 'N':
pushNucleotide(&initialKmer, ADENINE);
break;
case 'C':
pushNucleotide(&initialKmer, CYTOSINE);
break;
case 'G':
pushNucleotide(&initialKmer, GUANINE);
break;
case 'T':
pushNucleotide(&initialKmer, THYMINE);
break;
default:
velvetLog
("Irregular sequence file: are you sure your Sequence and Roadmap file come from the same source?\n");
fflush(stdout);
#ifdef DEBUG
abort();
#endif
exit(1);
}
}
annot = getNextAnnotation(annot);
annotIndex++;
}
}
while (markerIndex < lastMarkerIndex) {
if (currentPosition ==
getInsertionMarkerPosition(currentMarker)) {
convertMarker(currentMarker,
latestPreNodeID);
currentMarker++;
markerIndex++;
} else {
nextStop =
getInsertionMarkerPosition
(currentMarker);
//if (sequenceIndex == 481)
// velvetLog("Adding pre nodes from %lli to %lli\n", (long long) currentPosition, (long long) nextStop);
addPreNodeToPreGraph_pg(preGraph,
currentPosition,
nextStop, seqReadInfo,
&initialKmer,
preNodeCounter);
if (latestPreNodeID == 0)
chains[sequenceIndex] =
preNodeCounter;
latestPreNodeID = preNodeCounter++;
currentPosition =
getInsertionMarkerPosition
(currentMarker);
}
}
if (seqReadInfo->m_bIsBinary) {
free(strString);
} else {
// End of sequence
if (!fgets(line, lineLength, file) && sequenceIndex < sequenceCount_pg(preGraph))
exitErrorf(EXIT_FAILURE, true, "%s incomplete.", sequenceFilename);
//velvetLog(" \n");
}
if (latestPreNodeID == 0)
chains[sequenceIndex] = preNodeCounter;
}
free(markerCounters);
if (!seqReadInfo->m_bIsBinary) {
fclose(file);
}
}
static void connectPreNodeToTheNext(IDnum * currentPreNodeID,
IDnum nextPreNodeID,
Coordinate * currentPosition,
IDnum sequenceIndex,
boolean isReference,
PreGraph * preGraph)
{
if (nextPreNodeID == 0)
return;
#ifdef _OPENMP
lockTwoNodes(*currentPreNodeID, nextPreNodeID);
#endif
if (isReference)
incrementNodeReferenceMarkerCount_pg(preGraph, nextPreNodeID);
if (!isReference && *currentPreNodeID != 0)
createPreArc_pg(*currentPreNodeID, nextPreNodeID,
preGraph);
#ifdef _OPENMP
unLockTwoNodes(*currentPreNodeID, nextPreNodeID);
#endif
*currentPreNodeID = nextPreNodeID;
*currentPosition +=
getPreNodeLength_pg(*currentPreNodeID, preGraph);
}
static IDnum chooseNextInternalPreNode(IDnum currentPreNodeID,
IDnum sequenceIndex,
PreGraph * preGraph, IDnum * chains)
{
if (currentPreNodeID >= preNodeCount_pg(preGraph))
return 0;
if (sequenceIndex >= sequenceCount_pg(preGraph))
return currentPreNodeID + 1;
if (currentPreNodeID + 1 < chains[sequenceIndex + 1])
return currentPreNodeID + 1;
return 0;
}
static void connectAnnotation(IDnum * currentPreNodeID, Annotation * annot,
Coordinate * currentPosition,
IDnum sequenceIndex, boolean isReference,
PreGraph * preGraph)
{
IDnum nextPreNodeID = getStartID(annot);
connectPreNodeToTheNext(currentPreNodeID, nextPreNodeID,
currentPosition,
sequenceIndex, isReference, preGraph);
while (*currentPreNodeID != getFinishID(annot)) {
nextPreNodeID = (*currentPreNodeID) + 1;
connectPreNodeToTheNext(currentPreNodeID, nextPreNodeID,
currentPosition,
sequenceIndex,
isReference,
preGraph);
}
}
static void reConnectAnnotation(IDnum * currentPreNodeID, Annotation * annot,
Coordinate * currentPosition,
IDnum sequenceIndex,
PreGraph * preGraph,
PreMarker ** previous)
{
IDnum nextPreNodeID = getStartID(annot);
#ifdef _OPENMP
lockNode(nextPreNodeID);
#endif
*previous = addPreMarker_pg(preGraph,
nextPreNodeID,
sequenceIndex,
currentPosition,
*previous);
#ifdef _OPENMP
unLockNode(nextPreNodeID);
#endif
while (*currentPreNodeID != getFinishID(annot)) {
nextPreNodeID = (*currentPreNodeID) + 1;
#ifdef _OPENMP
lockNode(nextPreNodeID);
#endif
*previous = addPreMarker_pg(preGraph,
nextPreNodeID,
sequenceIndex,
currentPosition,
*previous);
#ifdef _OPENMP
unLockNode(nextPreNodeID);
#endif
*currentPreNodeID = nextPreNodeID;
}
}
static void createPreMarkers(RoadMapArray * rdmaps, PreGraph * preGraph,
IDnum * chains)
{
IDnum sequenceIndex;
IDnum referenceCount = rdmaps->referenceCount;
#ifndef _OPENMP
Annotation *annot = rdmaps->annotations;
#endif
#ifdef _OPENMP
int threads = omp_get_max_threads();
if (threads > 8)
threads = 8;
#pragma omp parallel for num_threads(threads)
#endif
for (sequenceIndex = 1;
sequenceIndex <= referenceCount;
sequenceIndex++) {
#ifdef _OPENMP
Annotation *annot = getAnnotationInArray(rdmaps->annotations, annotationOffset[sequenceIndex - 1]);
#endif
RoadMap *rdmap;
Coordinate currentPosition, currentInternalPosition;
IDnum currentPreNodeID, nextInternalPreNodeID;
IDnum annotIndex, lastAnnotIndex;
PreMarker * previous;
if (sequenceIndex % 1000000 == 0)
velvetLog("Connecting %li / %li\n", (long) sequenceIndex,
(long) sequenceCount_pg(preGraph));
rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1);
annotIndex = 0;
lastAnnotIndex = getAnnotationCount(rdmap);
nextInternalPreNodeID = chooseNextInternalPreNode
(chains[sequenceIndex] - 1, sequenceIndex,
preGraph, chains);
previous = NULL;
currentPosition = 0;
currentInternalPosition = 0;
currentPreNodeID = 0;
// Recursion up to last annotation
while (annotIndex < lastAnnotIndex
|| nextInternalPreNodeID != 0) {
if (annotIndex == lastAnnotIndex
|| (nextInternalPreNodeID != 0
&& currentInternalPosition <
getPosition(annot))) {
#ifdef _OPENMP
lockNode(nextInternalPreNodeID);
#endif
previous = addPreMarker_pg(preGraph,
nextInternalPreNodeID,
sequenceIndex,
¤tPosition,
previous);
#ifdef _OPENMP
unLockNode(nextInternalPreNodeID);
#endif
currentPreNodeID = nextInternalPreNodeID;
nextInternalPreNodeID =
chooseNextInternalPreNode
(currentPreNodeID, sequenceIndex,
preGraph, chains);
currentInternalPosition +=
getPreNodeLength_pg(currentPreNodeID,
preGraph);
} else {
reConnectAnnotation(¤tPreNodeID, annot,
¤tPosition,
sequenceIndex,
preGraph,
&previous);
annot = getNextAnnotation(annot);
annotIndex++;
}
}
}
}
// Threads each sequences and creates preArcs according to road map indications
static void connectPreNodes(RoadMapArray * rdmaps, PreGraph * preGraph,
IDnum * chains)
{
IDnum sequenceIndex;
IDnum referenceCount = rdmaps->referenceCount;
#ifdef _OPENMP
annotationOffset = mallocOrExit(rdmaps->length + 1, Coordinate);
annotationOffset[0] = 0;
for (sequenceIndex = 1; sequenceIndex <= rdmaps->length; sequenceIndex++)
annotationOffset[sequenceIndex] = annotationOffset[sequenceIndex - 1] +
getAnnotationCount(getRoadMapInArray(rdmaps, sequenceIndex - 1));
#else
Annotation *annot = rdmaps->annotations;
#endif
if (rdmaps->referenceCount > 0)
allocatePreMarkerCountSpace_pg(preGraph);
#ifdef _OPENMP
int threads = omp_get_max_threads();
if (threads > 8)
threads = 8;
#pragma omp parallel for num_threads(threads)
#endif
for (sequenceIndex = 1;
sequenceIndex <= sequenceCount_pg(preGraph);
sequenceIndex++) {
#ifdef _OPENMP
Annotation *annot = getAnnotationInArray(rdmaps->annotations, annotationOffset[sequenceIndex - 1]);
#endif
RoadMap *rdmap;
Coordinate currentPosition, currentInternalPosition;
IDnum currentPreNodeID, nextInternalPreNodeID;
IDnum annotIndex, lastAnnotIndex;
boolean isReference;
if (sequenceIndex % 1000000 == 0)
velvetLog("Connecting %li / %li\n", (long) sequenceIndex,
(long) sequenceCount_pg(preGraph));
rdmap = getRoadMapInArray(rdmaps, sequenceIndex - 1);
annotIndex = 0;
lastAnnotIndex = getAnnotationCount(rdmap);
nextInternalPreNodeID = chooseNextInternalPreNode
(chains[sequenceIndex] - 1, sequenceIndex,
preGraph, chains);
isReference = (sequenceIndex <= referenceCount);
currentPosition = 0;
currentInternalPosition = 0;
currentPreNodeID = 0;
// Recursion up to last annotation
while (annotIndex < lastAnnotIndex
|| nextInternalPreNodeID != 0) {
if (annotIndex == lastAnnotIndex
|| (nextInternalPreNodeID != 0
&& currentInternalPosition <
getPosition(annot))) {
connectPreNodeToTheNext(¤tPreNodeID,
nextInternalPreNodeID,
¤tPosition,
sequenceIndex,
isReference,
preGraph);
nextInternalPreNodeID =
chooseNextInternalPreNode
(currentPreNodeID, sequenceIndex,
preGraph, chains);
currentInternalPosition +=
getPreNodeLength_pg(currentPreNodeID,
preGraph);
} else {
connectAnnotation(¤tPreNodeID, annot,
¤tPosition,
sequenceIndex, isReference,
preGraph);
annot = getNextAnnotation(annot);
annotIndex++;
}
}
}
if (rdmaps->referenceCount > 0) {
allocatePreMarkerSpace_pg(preGraph);
createPreMarkers(rdmaps, preGraph, chains);
}
#ifdef _OPENMP
free(annotationOffset);
annotationOffset = NULL;
#endif
}
// Post construction memory deallocation routine (of sorts, could certainly be optimized)
static void
cleanUpMemory(PreGraph * preGraph, RoadMapArray * rdmaps, IDnum * chains)
{
// Killing off roadmaps
destroyRoadMapArray(rdmaps);
// Finishing off the chain markers
free(chains);
}
// The full monty, wrapped up in one function
PreGraph *newPreGraph_pg(RoadMapArray * rdmapArray, SequencesReader *seqReadInfo)
{
int WORDLENGTH = rdmapArray->WORDLENGTH;
IDnum sequenceCount = rdmapArray->length;
IDnum *markerCounters = callocOrExit(sequenceCount + 1, IDnum);
IDnum *chains = callocOrExit(sequenceCount + 1, IDnum);
InsertionMarker *insertionMarkers;
InsertionMarker *veryLastMarker;
PreGraph *preGraph =
emptyPreGraph_pg(sequenceCount, rdmapArray->referenceCount, rdmapArray->WORDLENGTH, rdmapArray->double_strand);
velvetLog("Creating insertion markers\n");
setInsertionMarkers(rdmapArray, markerCounters, &veryLastMarker,
&insertionMarkers);
velvetLog("Counting preNodes\n");
countPreNodes(rdmapArray, preGraph, markerCounters,
insertionMarkers, veryLastMarker);
velvetLog("%li preNodes counted, creating them now\n",
(long) preNodeCount_pg(preGraph));
createPreNodes(rdmapArray, preGraph, markerCounters,
insertionMarkers, veryLastMarker, chains,
seqReadInfo, WORDLENGTH);
velvetLog("Adjusting marker info...\n");
convertInsertionMarkers(insertionMarkers, veryLastMarker, chains);
#ifdef _OPENMP
createNodeLocks(preGraph);
#endif
velvetLog("Connecting preNodes\n");
connectPreNodes(rdmapArray, preGraph, chains);
velvetLog("Cleaning up memory\n");
cleanUpMemory(preGraph, rdmapArray, chains);
#ifdef _OPENMP
free(nodeLocks);
nodeLocks = NULL;
#endif
velvetLog("Done creating preGraph\n");
return preGraph;
}
|
GB_binop__min_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__min_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__min_fp64)
// A*D function (colscale): GB (_AxD__min_fp64)
// D*A function (rowscale): GB (_DxB__min_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__min_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__min_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_fp64)
// C=scalar+B GB (_bind1st__min_fp64)
// C=scalar+B' GB (_bind1st_tran__min_fp64)
// C=A+scalar GB (_bind2nd__min_fp64)
// C=A'+scalar GB (_bind2nd_tran__min_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = fmin (aij, bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = fmin (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MIN || GxB_NO_FP64 || GxB_NO_MIN_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__min_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__min_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__min_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__min_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__min_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = fmin (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__min_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = fmin (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = fmin (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__min_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = fmin (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__min_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_fp32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp32_int8
// op(A') function: GB_tran__minv_fp32_int8
// C type: float
// A type: int8_t
// cast: float cij = (float) aij
// unaryop: cij = (1.0F)/aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = (1.0F)/x ;
// casting
#define GB_CASTING(z, aij) \
float z = (float) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp32_int8
(
float *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
spinful_fermion_basis_core.h | #ifndef _SPINFUL_FERMION_BASIS_CORE_OP_H
#define _SPINFUL_FERMION_BASIS_CORE_OP_H
#include <complex>
#include "general_basis_core.h"
#include "local_pcon_basis_core.h"
#include "spinless_fermion_basis_core.h"
#include "numpy/ndarraytypes.h"
template<class I>
I inline spinful_fermion_map_bits(I s,const int map[],const int N,int &sign){
I ss = 0;
int pos_list[64];
int np = 0;
bool f_count = 0;
for(int i=2*N;i>=0;i--){
int j = map[i];
int n = (s&1);
if(n){pos_list[np]=( j<0 ? N + j : N - j - 1); ++np;}
ss ^= ( j<0 ? (n^1)<<(2*N+j) : n<<(2*N-j-1) );
f_count ^= (n && (i&1)) && (j<0);
s >>= 1;
}
//starting at 2nd element as first element is already sorted.
//Loop Invariant - left part of the array is already sorted.
if(np > 1){
for (int i = 1; i < np; i++) {
int moveMe = pos_list[i];
int j = i;
while (j > 0 && moveMe > pos_list[j - 1]) {
//Move element
pos_list[j] = pos_list[j - 1];
--j;
//increase the count as element swap is happend
f_count ^= 1;
}
pos_list[j] = moveMe;
}
}
sign *= (f_count ? -1 : 1);
return ss;
}
template<class I>
class spinful_fermion_basis_core : public local_pcon_basis_core<I>
{
public:
spinful_fermion_basis_core(const int _N) : \
local_pcon_basis_core<I>::local_pcon_basis_core(_N) {}
spinful_fermion_basis_core(const int _N,const int _nt,const int _maps[], \
const int _pers[], const int _qs[]) : \
local_pcon_basis_core<I>::local_pcon_basis_core(_N,_nt,_maps,_pers,_qs) {}
~spinful_fermion_basis_core() {}
I map_state(I s,int n_map,int &sign){
if(general_basis_core<I>::nt<=0){
return s;
}
const int n = general_basis_core<I>::N;
return spinful_fermion_map_bits(s,&general_basis_core<I>::maps[n_map*n],n,sign);
}
void map_state(I s[],npy_intp M,int n_map,signed char sign[]){
if(general_basis_core<I>::nt<=0){
return;
}
const int n = general_basis_core<I>::N;
const int * map = &general_basis_core<I>::maps[n_map*n];
#pragma omp for schedule(static,1)
for(npy_intp i=0;i<M;i++){
int temp_sign = sign[i];
s[i] = spinful_fermion_map_bits(s[i],map,n,temp_sign);
sign[i] = temp_sign;
}
}
int op(I &r,std::complex<double> &m,const int n_op,const char opstr[],const int indx[]){
I s = r;
I one = 1;
for(int j=n_op-1;j>-1;j--){
int ind = 2*general_basis_core<I>::N-indx[j]-1;
I f_count = bit_count(r,ind);
m *= std::complex<double>((f_count&1)?-1:1);
I b = (one << ind);
bool a = bool((r >> ind)&one);
char op = opstr[j];
switch(op){
case 'z':
m *= (a?0.5:-0.5);
break;
case 'n':
m *= (a?1:0);
break;
case '+':
m *= (a?0:1);
r ^= b;
break;
case '-':
m *= (a?1:0);
r ^= b;
break;
case 'I':
break;
default:
return -1;
}
if(std::abs(m)==0){
r = s;
break;
}
}
return 0;
}
};
#endif
|
entrega-malloc.c | #include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include "ctimer.h"
void add (int A[], int B[], int C[], int N) {
int i, carry, sum;
carry = 0;
for (i=0; i<N; i++) {
sum = A[i] + B[i] + carry;
if (sum >= 10) {
carry = 1;
sum -= 10;
} else
carry = 0;
C[i] = sum;
}
}
void multiply_one_digit (int A[], int B[], int n, int N) {
int i, carry;
carry = 0;
for (i=0; i<N; i++) {
B[i] = n * A[i];
B[i] += carry;
if (B[i] >= 10) {
carry = B[i] / 10;
B[i] %= 10;
} else
carry = 0;
}
}
void shift_left (int A[], int n, int N) {
int i;
for (i=N-1; i>=n; i--) A[i] = A[i-n];
while (i >= 0) A[i--] = 0;
}
void multiply (int A[], int B[], int C[], int N) {
int i, j, P[N];
for (i=0; i<N; i++) {
multiply_one_digit (B, P, A[i], N);
shift_left (P, i, N);
add (C, P, C, N);
}
}
main(int argc, char**argv)
{
// DECLARACION DE VARIABLES
double t1,t2,tucpu,tscpu;
int len1 = strlen(argv[1]);
int len2 = strlen(argv[2]);
int N = len1+len2;
int A[N], B[N], C[N];
for(int i=0;i < N; i++){
A[i] = 0;
B[i] = 0;
C[i] = 0;
}
// RELLENADO DE MATRICES
char k[len1];
strcpy(k, argv[1]);
for(int i=0;i < len1; i++){
A[i] = k[len1-1-i] - '0';
}
char l[len2];
strcpy(l, argv[2]);
for(int i=0;i < len2; i++){
B[i] = l[len2-1-i] - '0';
}
// SECUENCIAL
ctimer(&t1,&tucpu,&tscpu);
multiply(A,B,C,N);
ctimer(&t2,&tucpu,&tscpu);
printf("---SECUENCIAL---\n");
printf("A [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", A[loop]);
printf("]\n");
printf("B [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", B[loop]);
printf("]\nC [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", C[loop]);
printf("]\n");
printf(" ------- \n");
printf("Tiempo %f segundos \n",(float) (t2-t1));
printf(" ------- \n");
// PARALELO
printf("---PARALELO---\n");
omp_set_num_threads(4);
int D[4*N];
int n, i, carry,j,sum, P[N], tid, nthreads;
int E[N];
for(int i=0;i < N; i++)
E[i] = 0;
ctimer(&t1,&tucpu,&tscpu);
#pragma omp parallel shared (B,A) private(i,n, carry, j, sum, P, tid)
{
nthreads = omp_get_num_threads();
for(i=0;i < N*nthreads; i++){
D[i] = 0;
}
#pragma omp barrier
tid = omp_get_thread_num();
for (i=tid; i<len1; i=i+nthreads) {
n = A[i];
carry = 0;
for (j=0; j<N; j++) {
P[j] = n * B[j];
P[j] += carry;
if (P[j] >= 10) {
carry = P[j] / 10;
P[j] %= 10;
} else
carry = 0;
}
// SHIFT
for (j=N-1; j>=i; j--) P[j] = P[j-i];
while (j >= 0) P[j--] = 0;
// SUMA Y ACUMULACION EN D
carry = 0;
sum = 0;
for (j=0; j<N; j++) {
sum = D[tid*N+j] + P[j] + carry;
if (sum >= 10) {
carry = 1;
sum -= 10;
} else
carry = 0;
D[tid*N+j] = sum;
}
}
#pragma omp barrier
// TRANSFERENCIA A E SUMANDO PARCIALES
if(tid==0){
for(int k=0; k<nthreads;k++){
carry = 0;
sum = 0;
for (j=0; j<N; j++) {
sum = E[j] + D[k*N+j] + carry;
if (sum >= 10) {
carry = 1;
sum -= 10;
} else
carry = 0;
E[j] = sum;
}
}
}
}
ctimer(&t2,&tucpu,&tscpu);
printf("A [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", A[loop]);
printf("]\n");
printf("B [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", B[loop]);
printf("]\n");
printf("C [ ");
for(int loop = N-1; loop >= 0; loop--)
printf("%d ", E[loop]);
printf("]\n");
printf(" ------- \n");
printf("Tiempo %f segundos \n",(float) (t2-t1));
printf(" ------- \n");
} |
par_fsai_setup.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "_hypre_blas.h"
#include "_hypre_lapack.h"
#define DEBUG 0
/*****************************************************************************
*
* Routine for driving the setup phase of FSAI
*
******************************************************************************/
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDenseMat
*
* Extract A[P, P] into a dense matrix.
*
* Parameters:
* - A: The hypre_CSRMatrix whose submatrix will be extracted.
* - A_sub: A patt_size^2 - sized array to hold the lower triangular of
* the symmetric submatrix A[P, P].
* - pattern: A patt_size - sized array to hold the wanted rows/cols.
* - marker: A work array of length equal to the number of columns in A.
* All values should be -1.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixExtractDenseMat( hypre_CSRMatrix *A,
hypre_Vector *A_sub,
HYPRE_Int *pattern,
HYPRE_Int patt_size,
HYPRE_Int *marker )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Complex *A_a = hypre_CSRMatrixData(A);
HYPRE_Complex *A_sub_data = hypre_VectorData(A_sub);
/* Local variables */
HYPRE_Int cc, i, ii, j;
// TODO: Do we need to reinitialize all entries?
for (i = 0; i < hypre_VectorSize(A_sub); i++)
{
A_sub_data[i] = 0.0;
}
for (i = 0; i < patt_size; i++)
{
ii = pattern[i];
for (j = A_i[ii]; j < A_i[ii + 1]; j++)
{
if ((A_j[j] <= ii) &&
(cc = marker[A_j[j]]) >= 0)
{
A_sub_data[cc * patt_size + i] = A_a[j];
}
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixExtractDenseRow
*
* Extract the dense subrow from a matrix (A[i, P])
*
* Parameters:
* - A: The hypre_CSRMatrix whose subrow will be extracted.
* - A_subrow: The extracted subrow of A[i, P].
* - marker: A work array of length equal to the number of row in A.
* Assumed to be set to all -1.
* - row_num: which row index of A we want to extract data from.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixExtractDenseRow( hypre_CSRMatrix *A,
hypre_Vector *A_subrow,
HYPRE_Int *marker,
HYPRE_Int row_num )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Complex *A_a = hypre_CSRMatrixData(A);
HYPRE_Complex *sub_row_data = hypre_VectorData(A_subrow);
/* Local variables */
HYPRE_Int j, cc;
for (j = 0; j < hypre_VectorSize(A_subrow); j++)
{
sub_row_data[j] = 0.0;
}
for (j = A_i[row_num]; j < A_i[row_num + 1]; j++)
{
if ((cc = marker[A_j[j]]) >= 0)
{
sub_row_data[cc] = A_a[j];
}
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_FindKapGrad
*
* Finding the Kaporin Gradient contribution (psi) of a given row.
*
* Parameters:
* - A: CSR matrix diagonal of A.
* - kap_grad: Array holding the kaporin gradient.
* This will we modified.
* - kg_pos: Array of the nonzero column indices of kap_grad.
* To be modified.
* - G_temp: Work array of G for row i.
* - pattern: Array of column indices of the nonzeros of G_temp.
* - patt_size: Number of column indices of the nonzeros of G_temp.
* - max_row_size: To ensure we don't overfill kap_grad.
* - row_num: Which row of G we are working on.
* - marker: Array of length equal to the number of rows in A.
* Assumed to all be set to -1.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FindKapGrad( hypre_CSRMatrix *A_diag,
hypre_Vector *kap_grad,
HYPRE_Int *kg_pos,
hypre_Vector *G_temp,
HYPRE_Int *pattern,
HYPRE_Int patt_size,
HYPRE_Int max_row_size,
HYPRE_Int row_num,
HYPRE_Int *kg_marker )
{
HYPRE_Int *A_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_a = hypre_CSRMatrixData(A_diag);
HYPRE_Complex *G_temp_data = hypre_VectorData(G_temp);
HYPRE_Complex *kap_grad_data = hypre_VectorData(kap_grad);
/* Local Variables */
HYPRE_Int i, ii, j, k, count, col;
count = 0;
/* Compute A[row_num, 0:(row_num-1)]*G_temp[i,i] */
for (j = A_i[row_num]; j < A_i[row_num + 1]; j++)
{
col = A_j[j];
if (col < row_num)
{
if (kg_marker[col] > -1)
{
/* Add A[row_num, col] to the tentative pattern */
kg_marker[col] = count + 1;
kg_pos[count] = col;
kap_grad_data[count] = A_a[j];
count++;
}
}
}
/* Compute A[0:(row_num-1), P]*G_temp[P, i] */
for (i = 0; i < patt_size; i++)
{
ii = pattern[i];
for (j = A_i[ii]; j < A_i[ii + 1]; j++)
{
col = A_j[j];
if (col < row_num)
{
k = kg_marker[col];
if (k == 0)
{
/* New entry in the tentative pattern */
kg_marker[col] = count + 1;
kg_pos[count] = col;
kap_grad_data[count] = G_temp_data[i] * A_a[j];
count++;
}
else if (k > 0)
{
/* Already existing entry in the tentative pattern */
kap_grad_data[k - 1] += G_temp_data[i] * A_a[j];
}
}
}
}
/* Update number of nonzero coefficients held in kap_grad */
hypre_VectorSize(kap_grad) = count;
/* Update to absolute values */
for (i = 0; i < count; i++)
{
kap_grad_data[i] = hypre_cabs(kap_grad_data[i]);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_swap2_ci
*--------------------------------------------------------------------------*/
void
hypre_swap2_ci( HYPRE_Complex *v,
HYPRE_Int *w,
HYPRE_Int i,
HYPRE_Int j )
{
HYPRE_Complex temp;
HYPRE_Int temp2;
temp = v[i];
v[i] = v[j];
v[j] = temp;
temp2 = w[i];
w[i] = w[j];
w[j] = temp2;
}
/*--------------------------------------------------------------------------
* hypre_qsort2_ci
*
* Quick Sort (largest to smallest) for complex arrays.
* Sort on real portion of v (HYPRE_Complex), move w.
*--------------------------------------------------------------------------*/
void
hypre_qsort2_ci( HYPRE_Complex *v,
HYPRE_Int *w,
HYPRE_Int left,
HYPRE_Int right )
{
HYPRE_Int i, last;
if (left >= right)
{
return;
}
hypre_swap2_ci(v, w, left, (left + right) / 2);
last = left;
for (i = left + 1; i <= right; i++)
{
if (hypre_creal(v[i]) > hypre_creal(v[left]))
{
hypre_swap2_ci(v, w, ++last, i);
}
}
hypre_swap2_ci(v, w, left, last);
hypre_qsort2_ci(v, w, left, last - 1);
hypre_qsort2_ci(v, w, last + 1, right);
}
/*--------------------------------------------------------------------------
* hypre_PartialSelectSortCI
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_PartialSelectSortCI( HYPRE_Complex *v,
HYPRE_Int *w,
HYPRE_Int size,
HYPRE_Int nentries )
{
HYPRE_Int i, k, pos;
for (k = 0; k < nentries; k++)
{
/* Find largest kth entry */
pos = k;
for (i = k + 1; i < size; i++)
{
if (hypre_creal(v[i]) > hypre_creal(v[pos]))
{
pos = i;
}
}
/* Move entry to beggining of the array */
hypre_swap2_ci(v, w, k, pos);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_AddToPattern
*
* Take the largest elements from the kaporin gradient and add their
* locations to pattern.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_AddToPattern( hypre_Vector *kap_grad,
HYPRE_Int *kg_pos,
HYPRE_Int *pattern,
HYPRE_Int *patt_size,
HYPRE_Int *kg_marker,
HYPRE_Int max_step_size )
{
HYPRE_Int kap_grad_size = hypre_VectorSize(kap_grad);
HYPRE_Complex *kap_grad_data = hypre_VectorData(kap_grad);
HYPRE_Int i, nentries;
/* Number of entries that can be added */
nentries = hypre_min(kap_grad_size, max_step_size);
/* Reorder candidates according to larger weights */
//hypre_qsort2_ci(kap_grad_data, &kg_pos, 0, kap_grad_size-1);
hypre_PartialSelectSortCI(kap_grad_data, kg_pos, kap_grad_size, nentries);
/* Update pattern with new entries */
for (i = 0; i < nentries; i++)
{
pattern[*patt_size + i] = kg_pos[i];
}
*patt_size += nentries;
/* Put pattern in ascending order */
hypre_qsort0(pattern, 0, (*patt_size) - 1);
/* Reset marked entries that are added to pattern */
for (i = 0; i < nentries; i++)
{
kg_marker[kg_pos[i]] = -1;
}
for (i = nentries; i < kap_grad_size; i++)
{
kg_marker[kg_pos[i]] = 0;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_DenseSPDSystemSolve
*
* Solve the dense SPD linear system with LAPACK:
*
* mat*lhs = -rhs
*
* Note: the contents of A change to its Cholesky factor.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_DenseSPDSystemSolve( hypre_Vector *mat,
hypre_Vector *rhs,
hypre_Vector *lhs )
{
HYPRE_Int size = hypre_VectorSize(rhs);
HYPRE_Complex *mat_data = hypre_VectorData(mat);
HYPRE_Complex *rhs_data = hypre_VectorData(rhs);
HYPRE_Complex *lhs_data = hypre_VectorData(lhs);
/* Local variables */
HYPRE_Int num_rhs = 1;
char uplo = 'L';
char msg[512];
HYPRE_Int i, info;
/* Copy RHS into LHS */
for (i = 0; i < size; i++)
{
lhs_data[i] = -rhs_data[i];
}
/* Compute Cholesky factor */
hypre_dpotrf(&uplo, &size, mat_data, &size, &info);
if (info)
{
hypre_sprintf(msg, "Error: dpotrf failed with code %d\n", info);
hypre_error_w_msg(HYPRE_ERROR_GENERIC, msg);
return hypre_error_flag;
}
/* Solve dense linear system */
hypre_dpotrs(&uplo, &size, &num_rhs, mat_data, &size, lhs_data, &size, &info);
if (info)
{
hypre_sprintf(msg, "Error: dpotrs failed with code %d\n", info);
hypre_error_w_msg(HYPRE_ERROR_GENERIC, msg);
return hypre_error_flag;
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_FSAISetupNative
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FSAISetupNative( void *fsai_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
/* Data structure variables */
hypre_ParFSAIData *fsai_data = (hypre_ParFSAIData*) fsai_vdata;
HYPRE_Real kap_tolerance = hypre_ParFSAIDataKapTolerance(fsai_data);
HYPRE_Int max_steps = hypre_ParFSAIDataMaxSteps(fsai_data);
HYPRE_Int max_step_size = hypre_ParFSAIDataMaxStepSize(fsai_data);
/* CSRMatrix A_diag variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A_diag);
HYPRE_Complex *A_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nnzs_diag_A = hypre_CSRMatrixNumNonzeros(A_diag);
HYPRE_Int avg_nnzrow_diag_A;
/* Matrix G variables */
hypre_ParCSRMatrix *G = hypre_ParFSAIDataGmat(fsai_data);
hypre_CSRMatrix *G_diag;
HYPRE_Int *G_i;
HYPRE_Int *G_j;
HYPRE_Complex *G_a;
HYPRE_Int max_nnzrow_diag_G; /* Max. number of nonzeros per row in G_diag */
HYPRE_Int max_cand_size; /* Max size of kg_pos */
/* Local variables */
char msg[512]; /* Warning message */
HYPRE_Complex *twspace; /* shared work space for omp threads */
/* Initalize some variables */
avg_nnzrow_diag_A = (num_rows_diag_A > 0) ? num_nnzs_diag_A / num_rows_diag_A : 0;
max_nnzrow_diag_G = max_steps * max_step_size + 1;
max_cand_size = avg_nnzrow_diag_A * max_nnzrow_diag_G;
G_diag = hypre_ParCSRMatrixDiag(G);
G_a = hypre_CSRMatrixData(G_diag);
G_i = hypre_CSRMatrixI(G_diag);
G_j = hypre_CSRMatrixJ(G_diag);
/* Allocate shared work space array for OpenMP threads */
twspace = hypre_CTAlloc(HYPRE_Complex, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST);
/**********************************************************************
* Start of Adaptive FSAI algorithm
***********************************************************************/
/* Cycle through each of the local rows */
HYPRE_ANNOTATE_REGION_BEGIN("%s", "MainLoop");
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
hypre_Vector *G_temp; /* Vector holding the values of G[i,:] */
hypre_Vector *A_sub; /* Vector holding the dense submatrix A[P, P] */
hypre_Vector *A_subrow; /* Vector holding A[i, P] */
hypre_Vector *kap_grad; /* Vector holding the Kaporin gradient values */
HYPRE_Int *kg_pos; /* Indices of nonzero entries of kap_grad */
HYPRE_Int *kg_marker; /* Marker array with nonzeros pointing to kg_pos */
HYPRE_Int *marker; /* Marker array with nonzeros pointing to P */
HYPRE_Int *pattern; /* Array holding column indices of G[i,:] */
HYPRE_Int patt_size; /* Number of entries in current pattern */
HYPRE_Int patt_size_old; /* Number of entries in previous pattern */
HYPRE_Int ii; /* Thread identifier */
HYPRE_Int num_threads; /* Number of active threads */
HYPRE_Int ns, ne; /* Initial and last row indices */
HYPRE_Int i, j, k, iloc; /* Loop variables */
HYPRE_Complex old_psi; /* GAG' before k-th interation of aFSAI */
HYPRE_Complex new_psi; /* GAG' after k-th interation of aFSAI */
HYPRE_Complex row_scale; /* Scaling factor for G_temp */
HYPRE_Complex *G_temp_data;
HYPRE_Complex *A_subrow_data;
HYPRE_Int num_rows_Gloc;
HYPRE_Int num_nnzs_Gloc;
HYPRE_Int *Gloc_i;
HYPRE_Int *Gloc_j;
HYPRE_Complex *Gloc_a;
/* Allocate and initialize local vector variables */
G_temp = hypre_SeqVectorCreate(max_nnzrow_diag_G);
A_subrow = hypre_SeqVectorCreate(max_nnzrow_diag_G);
kap_grad = hypre_SeqVectorCreate(max_cand_size);
A_sub = hypre_SeqVectorCreate(max_nnzrow_diag_G * max_nnzrow_diag_G);
pattern = hypre_CTAlloc(HYPRE_Int, max_nnzrow_diag_G, HYPRE_MEMORY_HOST);
kg_pos = hypre_CTAlloc(HYPRE_Int, max_cand_size, HYPRE_MEMORY_HOST);
kg_marker = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A, HYPRE_MEMORY_HOST);
marker = hypre_TAlloc(HYPRE_Int, num_rows_diag_A, HYPRE_MEMORY_HOST);
hypre_SeqVectorInitialize(G_temp);
hypre_SeqVectorInitialize(A_subrow);
hypre_SeqVectorInitialize(kap_grad);
hypre_SeqVectorInitialize(A_sub);
hypre_Memset(marker, -1, num_rows_diag_A * sizeof(HYPRE_Int), HYPRE_MEMORY_HOST);
/* Setting data variables for vectors */
G_temp_data = hypre_VectorData(G_temp);
A_subrow_data = hypre_VectorData(A_subrow);
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
hypre_partition1D(num_rows_diag_A, num_threads, ii, &ns, &ne);
num_rows_Gloc = ne - ns;
if (num_threads == 1)
{
Gloc_i = G_i;
Gloc_j = G_j;
Gloc_a = G_a;
}
else
{
num_nnzs_Gloc = num_rows_Gloc * max_nnzrow_diag_G;
Gloc_i = hypre_CTAlloc(HYPRE_Int, num_rows_Gloc + 1, HYPRE_MEMORY_HOST);
Gloc_j = hypre_CTAlloc(HYPRE_Int, num_nnzs_Gloc, HYPRE_MEMORY_HOST);
Gloc_a = hypre_CTAlloc(HYPRE_Complex, num_nnzs_Gloc, HYPRE_MEMORY_HOST);
}
for (i = ns; i < ne; i++)
{
patt_size = 0;
/* Set old_psi up front so we don't have to compute GAG' twice in the inner for-loop */
new_psi = old_psi = A_a[A_i[i]];
/* Cycle through each iteration for that row */
for (k = 0; k < max_steps; k++)
{
/* Compute Kaporin Gradient */
hypre_FindKapGrad(A_diag, kap_grad, kg_pos, G_temp, pattern,
patt_size, max_nnzrow_diag_G, i, kg_marker);
/* Find max_step_size largest values of the kaporin gradient,
find their column indices, and add it to pattern */
patt_size_old = patt_size;
hypre_AddToPattern(kap_grad, kg_pos, pattern, &patt_size,
kg_marker, max_step_size);
/* Update sizes */
hypre_VectorSize(A_sub) = patt_size * patt_size;
hypre_VectorSize(A_subrow) = patt_size;
hypre_VectorSize(G_temp) = patt_size;
if (patt_size == patt_size_old)
{
new_psi = old_psi;
break;
}
else
{
/* Gather A[P, P] and -A[i, P] */
for (j = 0; j < patt_size; j++)
{
marker[pattern[j]] = j;
}
hypre_CSRMatrixExtractDenseMat(A_diag, A_sub, pattern, patt_size, marker);
hypre_CSRMatrixExtractDenseRow(A_diag, A_subrow, marker, i);
/* Solve A[P, P] G[i, P]' = -A[i, P] */
hypre_DenseSPDSystemSolve(A_sub, A_subrow, G_temp);
/* Determine psi_{k+1} = G_temp[i]*A*G_temp[i]' */
new_psi = A_a[A_i[i]];
for (j = 0; j < patt_size; j++)
{
new_psi += G_temp_data[j] * A_subrow_data[j];
}
/* Check psi reduction */
if (hypre_cabs(new_psi - old_psi) < hypre_creal(kap_tolerance * old_psi))
{
break;
}
else
{
old_psi = new_psi;
}
}
}
/* Reset marker for building dense linear system */
for (j = 0; j < patt_size; j++)
{
marker[pattern[j]] = -1;
}
/* Compute scaling factor */
if (hypre_creal(new_psi) > 0 && hypre_cimag(new_psi) == 0)
{
row_scale = 1.0 / hypre_csqrt(new_psi);
}
else
{
hypre_sprintf(msg, "Warning: complex scaling factor found in row %d\n", i);
hypre_error_w_msg(HYPRE_ERROR_GENERIC, msg);
row_scale = 1.0 / hypre_cabs(A_a[A_i[i]]);
hypre_VectorSize(G_temp) = patt_size = 0;
}
/* Pass values of G_temp into G */
iloc = i - ns;
Gloc_j[Gloc_i[iloc]] = i;
Gloc_a[Gloc_i[iloc]] = row_scale;
for (k = 0; k < patt_size; k++)
{
j = Gloc_i[iloc] + k + 1;
Gloc_j[j] = pattern[k];
Gloc_a[j] = row_scale * G_temp_data[k];
kg_marker[pattern[k]] = 0;
}
Gloc_i[iloc + 1] = Gloc_i[iloc] + k + 1;
}
/* Copy data to shared memory */
twspace[ii + 1] = Gloc_i[num_rows_Gloc] - Gloc_i[0];
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#pragma omp single
#endif
{
for (i = 0; i < num_threads; i++)
{
twspace[i + 1] += twspace[i];
}
}
if (num_threads > 1)
{
/* Correct row pointer G_i */
G_i[ns] = twspace[ii];
for (i = ns; i < ne; i++)
{
iloc = i - ns;
G_i[i + 1] = G_i[i] + Gloc_i[iloc + 1] - Gloc_i[iloc];
}
/* Move G_j and G_a */
for (i = ns; i < ne; i++)
{
for (j = G_i[i]; j < G_i[i + 1]; j++)
{
G_j[j] = Gloc_j[j - G_i[ns]];
G_a[j] = Gloc_a[j - G_i[ns]];
}
}
hypre_TFree(Gloc_i, HYPRE_MEMORY_HOST);
hypre_TFree(Gloc_j, HYPRE_MEMORY_HOST);
hypre_TFree(Gloc_a, HYPRE_MEMORY_HOST);
}
/* Free memory */
hypre_SeqVectorDestroy(G_temp);
hypre_SeqVectorDestroy(A_subrow);
hypre_SeqVectorDestroy(kap_grad);
hypre_SeqVectorDestroy(A_sub);
hypre_TFree(kg_pos, HYPRE_MEMORY_HOST);
hypre_TFree(pattern, HYPRE_MEMORY_HOST);
hypre_TFree(marker, HYPRE_MEMORY_HOST);
hypre_TFree(kg_marker, HYPRE_MEMORY_HOST);
} /* end openmp region */
HYPRE_ANNOTATE_REGION_END("%s", "MainLoop");
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
/* Update local number of nonzeros of G */
hypre_CSRMatrixNumNonzeros(G_diag) = G_i[num_rows_diag_A];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_FSAISetupOMPDyn
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FSAISetupOMPDyn( void *fsai_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
/* Data structure variables */
hypre_ParFSAIData *fsai_data = (hypre_ParFSAIData*) fsai_vdata;
HYPRE_Real kap_tolerance = hypre_ParFSAIDataKapTolerance(fsai_data);
HYPRE_Int max_steps = hypre_ParFSAIDataMaxSteps(fsai_data);
HYPRE_Int max_step_size = hypre_ParFSAIDataMaxStepSize(fsai_data);
/* CSRMatrix A_diag variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A_diag);
HYPRE_Complex *A_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nnzs_diag_A = hypre_CSRMatrixNumNonzeros(A_diag);
HYPRE_Int avg_nnzrow_diag_A;
/* Matrix G variables */
hypre_ParCSRMatrix *G = hypre_ParFSAIDataGmat(fsai_data);
hypre_CSRMatrix *G_diag;
HYPRE_Int *G_i;
HYPRE_Int *G_j;
HYPRE_Complex *G_a;
HYPRE_Int *G_nnzcnt; /* Array holding number of nonzeros of row G[i,:] */
HYPRE_Int max_nnzrow_diag_G; /* Max. number of nonzeros per row in G_diag */
HYPRE_Int max_cand_size; /* Max size of kg_pos */
/* Local variables */
HYPRE_Int i, j, jj;
char msg[512]; /* Warning message */
HYPRE_Complex *twspace; /* shared work space for omp threads */
/* Initalize some variables */
avg_nnzrow_diag_A = num_nnzs_diag_A / num_rows_diag_A;
max_nnzrow_diag_G = max_steps * max_step_size + 1;
max_cand_size = avg_nnzrow_diag_A * max_nnzrow_diag_G;
G_diag = hypre_ParCSRMatrixDiag(G);
G_a = hypre_CSRMatrixData(G_diag);
G_i = hypre_CSRMatrixI(G_diag);
G_j = hypre_CSRMatrixJ(G_diag);
G_nnzcnt = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A, HYPRE_MEMORY_HOST);
/* Allocate shared work space array for OpenMP threads */
twspace = hypre_CTAlloc(HYPRE_Complex, hypre_NumThreads() + 1, HYPRE_MEMORY_HOST);
/**********************************************************************
* Start of Adaptive FSAI algorithm
***********************************************************************/
/* Cycle through each of the local rows */
HYPRE_ANNOTATE_REGION_BEGIN("%s", "MainLoop");
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
hypre_Vector *G_temp; /* Vector holding the values of G[i,:] */
hypre_Vector *A_sub; /* Vector holding the dense submatrix A[P, P] */
hypre_Vector *A_subrow; /* Vector holding A[i, P] */
hypre_Vector *kap_grad; /* Vector holding the Kaporin gradient values */
HYPRE_Int *kg_pos; /* Indices of nonzero entries of kap_grad */
HYPRE_Int *kg_marker; /* Marker array with nonzeros pointing to kg_pos */
HYPRE_Int *marker; /* Marker array with nonzeros pointing to P */
HYPRE_Int *pattern; /* Array holding column indices of G[i,:] */
HYPRE_Int patt_size; /* Number of entries in current pattern */
HYPRE_Int patt_size_old; /* Number of entries in previous pattern */
HYPRE_Int i, j, k; /* Loop variables */
HYPRE_Complex old_psi; /* GAG' before k-th interation of aFSAI */
HYPRE_Complex new_psi; /* GAG' after k-th interation of aFSAI */
HYPRE_Complex row_scale; /* Scaling factor for G_temp */
HYPRE_Complex *G_temp_data;
HYPRE_Complex *A_subrow_data;
/* Allocate and initialize local vector variables */
G_temp = hypre_SeqVectorCreate(max_nnzrow_diag_G);
A_subrow = hypre_SeqVectorCreate(max_nnzrow_diag_G);
kap_grad = hypre_SeqVectorCreate(max_cand_size);
A_sub = hypre_SeqVectorCreate(max_nnzrow_diag_G * max_nnzrow_diag_G);
pattern = hypre_CTAlloc(HYPRE_Int, max_nnzrow_diag_G, HYPRE_MEMORY_HOST);
kg_pos = hypre_CTAlloc(HYPRE_Int, max_cand_size, HYPRE_MEMORY_HOST);
kg_marker = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A, HYPRE_MEMORY_HOST);
marker = hypre_TAlloc(HYPRE_Int, num_rows_diag_A, HYPRE_MEMORY_HOST);
hypre_SeqVectorInitialize(G_temp);
hypre_SeqVectorInitialize(A_subrow);
hypre_SeqVectorInitialize(kap_grad);
hypre_SeqVectorInitialize(A_sub);
hypre_Memset(marker, -1, num_rows_diag_A * sizeof(HYPRE_Int), HYPRE_MEMORY_HOST);
/* Setting data variables for vectors */
G_temp_data = hypre_VectorData(G_temp);
A_subrow_data = hypre_VectorData(A_subrow);
#ifdef HYPRE_USING_OPENMP
#pragma omp for schedule(dynamic)
#endif
for (i = 0; i < num_rows_diag_A; i++)
{
patt_size = 0;
/* Set old_psi up front so we don't have to compute GAG' twice in the inner for-loop */
new_psi = old_psi = A_a[A_i[i]];
/* Cycle through each iteration for that row */
for (k = 0; k < max_steps; k++)
{
/* Compute Kaporin Gradient */
hypre_FindKapGrad(A_diag, kap_grad, kg_pos, G_temp, pattern,
patt_size, max_nnzrow_diag_G, i, kg_marker);
/* Find max_step_size largest values of the kaporin gradient,
find their column indices, and add it to pattern */
patt_size_old = patt_size;
hypre_AddToPattern(kap_grad, kg_pos, pattern, &patt_size,
kg_marker, max_step_size);
/* Update sizes */
hypre_VectorSize(A_sub) = patt_size * patt_size;
hypre_VectorSize(A_subrow) = patt_size;
hypre_VectorSize(G_temp) = patt_size;
if (patt_size == patt_size_old)
{
new_psi = old_psi;
break;
}
else
{
/* Gather A[P, P] and -A[i, P] */
for (j = 0; j < patt_size; j++)
{
marker[pattern[j]] = j;
}
hypre_CSRMatrixExtractDenseMat(A_diag, A_sub, pattern, patt_size, marker);
hypre_CSRMatrixExtractDenseRow(A_diag, A_subrow, marker, i);
/* Solve A[P, P] G[i, P]' = -A[i, P] */
hypre_DenseSPDSystemSolve(A_sub, A_subrow, G_temp);
/* Determine psi_{k+1} = G_temp[i]*A*G_temp[i]' */
new_psi = A_a[A_i[i]];
for (j = 0; j < patt_size; j++)
{
new_psi += G_temp_data[j] * A_subrow_data[j];
}
/* Check psi reduction */
if (hypre_cabs(new_psi - old_psi) < hypre_creal(kap_tolerance * old_psi))
{
break;
}
else
{
old_psi = new_psi;
}
}
}
/* Reset marker for building dense linear system */
for (j = 0; j < patt_size; j++)
{
marker[pattern[j]] = -1;
}
/* Compute scaling factor */
if (hypre_creal(new_psi) > 0 && hypre_cimag(new_psi) == 0)
{
row_scale = 1.0 / hypre_csqrt(new_psi);
}
else
{
hypre_sprintf(msg, "Warning: complex scaling factor found in row %d\n", i);
hypre_error_w_msg(HYPRE_ERROR_GENERIC, msg);
row_scale = 1.0 / hypre_cabs(A_a[A_i[i]]);
hypre_VectorSize(G_temp) = patt_size = 0;
}
/* Pass values of G_temp into G */
j = i * max_nnzrow_diag_G;
G_j[j] = i;
G_a[j] = row_scale;
j++;
for (k = 0; k < patt_size; k++)
{
G_j[j] = pattern[k];
G_a[j++] = row_scale * G_temp_data[k];
kg_marker[pattern[k]] = 0;
}
G_nnzcnt[i] = patt_size + 1;
} /* omp for schedule(dynamic) */
/* Free memory */
hypre_SeqVectorDestroy(G_temp);
hypre_SeqVectorDestroy(A_subrow);
hypre_SeqVectorDestroy(kap_grad);
hypre_SeqVectorDestroy(A_sub);
hypre_TFree(kg_pos, HYPRE_MEMORY_HOST);
hypre_TFree(pattern, HYPRE_MEMORY_HOST);
hypre_TFree(marker, HYPRE_MEMORY_HOST);
hypre_TFree(kg_marker, HYPRE_MEMORY_HOST);
} /* end openmp region */
HYPRE_ANNOTATE_REGION_END("%s", "MainLoop");
/* Reorder array */
G_i[0] = 0;
for (i = 0; i < num_rows_diag_A; i++)
{
G_i[i + 1] = G_i[i] + G_nnzcnt[i];
jj = i * max_nnzrow_diag_G;
for (j = G_i[i]; j < G_i[i + 1]; j++)
{
G_j[j] = G_j[jj];
G_a[j] = G_a[jj++];
}
}
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
hypre_TFree(G_nnzcnt, HYPRE_MEMORY_HOST);
/* Update local number of nonzeros of G */
hypre_CSRMatrixNumNonzeros(G_diag) = G_i[num_rows_diag_A];
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_FSAISetup
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FSAISetup( void *fsai_vdata,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u )
{
hypre_ParFSAIData *fsai_data = (hypre_ParFSAIData*) fsai_vdata;
HYPRE_Int max_steps = hypre_ParFSAIDataMaxSteps(fsai_data);
HYPRE_Int max_step_size = hypre_ParFSAIDataMaxStepSize(fsai_data);
HYPRE_Int algo_type = hypre_ParFSAIDataAlgoType(fsai_data);
HYPRE_Int print_level = hypre_ParFSAIDataPrintLevel(fsai_data);
HYPRE_Int eig_max_iters = hypre_ParFSAIDataEigMaxIters(fsai_data);
/* ParCSRMatrix A variables */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A);
/* CSRMatrix A_diag variables */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
/* Work vectors */
hypre_ParVector *r_work;
hypre_ParVector *z_work;
/* G variables */
hypre_ParCSRMatrix *G;
HYPRE_Int max_nnzrow_diag_G; /* Max. number of nonzeros per row in G_diag */
HYPRE_Int max_nonzeros_diag_G; /* Max. number of nonzeros in G_diag */
HYPRE_ANNOTATE_FUNC_BEGIN;
/* Create and initialize work vectors used in the solve phase */
r_work = hypre_ParVectorCreate(comm, num_rows_A, row_starts_A);
z_work = hypre_ParVectorCreate(comm, num_rows_A, row_starts_A);
hypre_ParVectorInitialize(r_work);
hypre_ParVectorInitialize(z_work);
hypre_ParFSAIDataRWork(fsai_data) = r_work;
hypre_ParFSAIDataZWork(fsai_data) = z_work;
/* Create and initialize the matrix G */
max_nnzrow_diag_G = max_steps * max_step_size + 1;
max_nonzeros_diag_G = num_rows_diag_A * max_nnzrow_diag_G;
G = hypre_ParCSRMatrixCreate(comm, num_rows_A, num_cols_A,
row_starts_A, col_starts_A,
0, max_nonzeros_diag_G, 0);
hypre_ParCSRMatrixInitialize(G);
hypre_ParFSAIDataGmat(fsai_data) = G;
/* Compute G */
switch (algo_type)
{
case 1:
hypre_FSAISetupNative(fsai_vdata, A, f, u);
break;
case 2:
hypre_FSAISetupOMPDyn(fsai_vdata, A, f, u);
break;
default:
hypre_FSAISetupNative(fsai_vdata, A, f, u);
}
/* Compute G^T */
G = hypre_ParFSAIDataGmat(fsai_data);
hypre_ParCSRMatrixTranspose(G, &hypre_ParFSAIDataGTmat(fsai_data), 1);
/* Update omega if requested */
if (eig_max_iters)
{
hypre_FSAIComputeOmega(fsai_vdata, A);
}
/* Print Setup info */
if (print_level == 1)
{
hypre_FSAIPrintStats(fsai_data, A);
}
#if DEBUG
char filename[] = "FSAI.out.G.ij";
hypre_ParCSRMatrixPrintIJ(G, 0, 0, filename);
#endif
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_FSAIPrintStats
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FSAIPrintStats( void *fsai_vdata,
hypre_ParCSRMatrix *A )
{
/* Data structure variables */
hypre_ParFSAIData *fsai_data = (hypre_ParFSAIData*) fsai_vdata;
HYPRE_Int algo_type = hypre_ParFSAIDataAlgoType(fsai_data);
HYPRE_Real kap_tolerance = hypre_ParFSAIDataKapTolerance(fsai_data);
HYPRE_Int max_steps = hypre_ParFSAIDataMaxSteps(fsai_data);
HYPRE_Int max_step_size = hypre_ParFSAIDataMaxStepSize(fsai_data);
HYPRE_Int eig_max_iters = hypre_ParFSAIDataEigMaxIters(fsai_data);
HYPRE_Real density;
hypre_ParCSRMatrix *G = hypre_ParFSAIDataGmat(fsai_data);
/* Local variables */
HYPRE_Int nprocs;
HYPRE_Int my_id;
hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &nprocs);
hypre_MPI_Comm_rank(hypre_ParCSRMatrixComm(A), &my_id);
/* Compute density */
hypre_ParCSRMatrixSetDNumNonzeros(G);
hypre_ParCSRMatrixSetDNumNonzeros(A);
density = hypre_ParCSRMatrixDNumNonzeros(G) /
hypre_ParCSRMatrixDNumNonzeros(A);
hypre_ParFSAIDataDensity(fsai_data) = density;
if (!my_id)
{
hypre_printf("*************************\n");
hypre_printf("* HYPRE FSAI Setup Info *\n");
hypre_printf("*************************\n\n");
hypre_printf("+---------------------------+\n");
hypre_printf("| No. MPI tasks: %6d |\n", nprocs);
hypre_printf("| No. threads: %6d |\n", hypre_NumThreads());
hypre_printf("| Algorithm type: %6d |\n", algo_type);
hypre_printf("| Max no. steps: %6d |\n", max_steps);
hypre_printf("| Max step size: %6d |\n", max_step_size);
hypre_printf("| Kap grad tol: %8.1e |\n", kap_tolerance);
hypre_printf("| Prec. density: %8.3f |\n", density);
hypre_printf("| Eig max iters: %6d |\n", eig_max_iters);
hypre_printf("| Omega factor: %8.3f |\n", hypre_ParFSAIDataOmega(fsai_data));
hypre_printf("+---------------------------+\n");
hypre_printf("\n\n");
}
return hypre_error_flag;
}
/*****************************************************************************
* hypre_FSAIComputeOmega
*
* Approximates the relaxation factor omega with 1/eigmax(G^T*G*A), where the
* maximum eigenvalue is computed with a fixed number of iterations via the
* power method.
******************************************************************************/
HYPRE_Int
hypre_FSAIComputeOmega( void *fsai_vdata,
hypre_ParCSRMatrix *A )
{
hypre_ParFSAIData *fsai_data = (hypre_ParFSAIData*) fsai_vdata;
hypre_ParCSRMatrix *G = hypre_ParFSAIDataGmat(fsai_data);
hypre_ParCSRMatrix *GT = hypre_ParFSAIDataGTmat(fsai_data);
hypre_ParVector *r_work = hypre_ParFSAIDataRWork(fsai_data);
hypre_ParVector *z_work = hypre_ParFSAIDataZWork(fsai_data);
HYPRE_Int eig_max_iters = hypre_ParFSAIDataEigMaxIters(fsai_data);
hypre_ParVector *eigvec;
hypre_ParVector *eigvec_old;
HYPRE_Int i;
HYPRE_Real norm, invnorm, lambda, omega;
eigvec_old = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(eigvec_old);
eigvec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(eigvec);
hypre_ParVectorSetRandomValues(eigvec, 256);
/* Power method iteration */
for (i = 0; i < eig_max_iters; i++)
{
norm = hypre_ParVectorInnerProd(eigvec, eigvec);
invnorm = 1.0 / sqrt(norm);
hypre_ParVectorScale(invnorm, eigvec);
if (i == (eig_max_iters - 1))
{
hypre_ParVectorCopy(eigvec, eigvec_old);
}
/* eigvec = GT * G * A * eigvec */
hypre_ParCSRMatrixMatvec(1.0, A, eigvec, 0.0, r_work);
hypre_ParCSRMatrixMatvec(1.0, G, r_work, 0.0, z_work);
hypre_ParCSRMatrixMatvec(1.0, GT, z_work, 0.0, eigvec);
}
norm = hypre_ParVectorInnerProd(eigvec, eigvec_old);
lambda = sqrt(norm);
/* Free memory */
hypre_ParVectorDestroy(eigvec_old);
hypre_ParVectorDestroy(eigvec);
/* Update omega */
omega = 1.0 / lambda;
hypre_FSAISetOmega(fsai_vdata, omega);
return hypre_error_flag;
}
|
pmtv-OpenMP.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#define omp_get_num_threads() 1
#define omp_set_num_threads(int)
#define omp_in_parallel() 0
#define omp_set_dynamic(int)
#endif
int main(int argc, char **argv)
{
int i, j, debug=0; //si ponemos debug a 1 es para ver la matriz y el vector pintados
//Argumento de entrada
if(argc < 2){
fprintf(stderr, "Falta tamaño de filas/columnas [opctional debug]\n");
exit(-1);
}
unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B)
if(argc == 3){
debug = atoi(argv[2]);
}
// Inicializamos la matriz triangular (superior)
int *vector, *result, **matriz;
vector = (int *) malloc(N*sizeof(int)); // malloc necesita el tamaño en bytes
result = (int *) malloc(N*sizeof(int)); //si no hay espacio suficiente malloc devuelve NULL
matriz = (int **) malloc(N*sizeof(int*));
for (i=0; i<N; i++)
matriz[i] = (int*) malloc(N*sizeof(int));
for (i=0; i<N; i++){
for (j=i; j<N; j++){
matriz[i][j] = 6;
vector[i] = 4;
result[i]=0;
}
}
if(debug==1){
// Pintamos la matriz
printf("Matriz:\n");
for (i=0; i<N; i++){
for (j=0; j<N; j++){
if (j >= i){
printf("%d ", matriz[i][j]);
}else{
printf("0 ");
}
}
printf("\n");
}
// Pintamos el vector
printf("Vector:\n");
for (i=0; i<N; i++){
printf("%d ", vector[i]);
}
printf("\n");
}
double t1, t2, t_total;
t1 = omp_get_wtime();
//A por los resultados!!
//uso runtime para poder variarlo luego con la variable OMP_SCHEDULE
#pragma omp parallel for private(j) schedule(runtime)
for (i=0; i<N; i++){
for (j=i; j<N; j++){
result[i] += matriz[i][j] * vector[j];
}
}
t2 = omp_get_wtime();
t_total = t2 - t1;
if(debug==1){
// Pintamos los resultados
printf("Resultado:\n");
for (i=0; i<N; i++){
printf("%d ", result[i]);
}
printf("\n");
}
//Se imprime el primer y el último valor del vector resultado :-)
printf("Tiempo = %11.9f\t Primera = %d\t Ultima=%d\n",t_total,result[0],result[N-1]);
// Liberamos la memoria
for (i=0; i<N; i++)
free(matriz[i]);
free(matriz);
free(vector);
free(result);
return 0;
}
|
GB_binop__rminus_int32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_int32)
// A*D function (colscale): GB (_AxD__rminus_int32)
// D*A function (rowscale): GB (_DxB__rminus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_int32)
// C=scalar+B GB (_bind1st__rminus_int32)
// C=scalar+B' GB (_bind1st_tran__rminus_int32)
// C=A+scalar GB (_bind2nd__rminus_int32)
// C=A'+scalar GB (_bind2nd_tran__rminus_int32)
// C type: int32_t
// A type: int32_t
// A pattern? 0
// B type: int32_t
// B pattern? 0
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_INT32 || GxB_NO_RMINUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int32_t alpha_scalar ;
int32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int32_t *) alpha_scalar_in)) ;
beta_scalar = (*((int32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_winograd_dot_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convolution_winograd_dot_pack16_avx512(Mat& bottom_blob_tm, int outch, const Mat& kernel_tm, Mat& top_blob_tm, const Option& opt)
{
// Mat bottom_blob_tm(tiles, 16/36/64, inch, 64u, 4, opt.workspace_allocator);
const int tiles = bottom_blob_tm.w;
const int batch = bottom_blob_tm.h;
const int inch = bottom_blob_tm.c;
// permute
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, batch, 64u, 16, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, batch, 64u, 16, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, batch, 64u, 16, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, batch, 64u, 16, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, batch, 64u, 16, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < batch; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x12
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(r0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(r0 + 16 * 9);
__m512 _ra = _mm512_load_ps(r0 + 16 * 10);
__m512 _rb = _mm512_load_ps(r0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
_mm512_store_ps(tmpptr + 16 * 8, _r8);
_mm512_store_ps(tmpptr + 16 * 9, _r9);
_mm512_store_ps(tmpptr + 16 * 10, _ra);
_mm512_store_ps(tmpptr + 16 * 11, _rb);
tmpptr += 192;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x8
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
tmpptr += 128;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x4
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
tmpptr += 64;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x2
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
tmpptr += 32;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
__m512 _val = _mm512_load_ps(r0);
_mm512_store_ps(tmpptr, _val);
tmpptr += 16;
r0 += bottom_blob_tm.cstep * 16;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, batch, outch, 64u, 16, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < batch; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
__m512 _sum8 = _mm512_setzero_ps();
__m512 _sum9 = _mm512_setzero_ps();
__m512 _suma = _mm512_setzero_ps();
__m512 _sumb = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(r0[8]);
__m512 _val9 = _mm512_set1_ps(r0[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(r0[10]);
__m512 _valb = _mm512_set1_ps(r0[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
_mm512_store_ps(output0_tm + 16 * 8, _sum8);
_mm512_store_ps(output0_tm + 16 * 9, _sum9);
_mm512_store_ps(output0_tm + 16 * 10, _suma);
_mm512_store_ps(output0_tm + 16 * 11, _sumb);
output0_tm += 16 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
output0_tm += 16 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
output0_tm += 16 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
output0_tm += 16 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
output0_tm += 16;
}
}
}
}
|
GB_unaryop__ainv_int16_int64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int16_int64
// op(A') function: GB_tran__ainv_int16_int64
// C type: int16_t
// A type: int64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_INT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int16_int64
(
int16_t *restrict Cx,
const int64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int16_int64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
simulation-par.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "datadef.h"
#include "init.h"
#include "tiles.h"
#include <mpi.h>
#define max(x,y) ((x)>(y)?(x):(y))
#define min(x,y) ((x)<(y)?(x):(y))
extern int *ileft, *iright;
extern int nprocs, proc;
/////////////////////////////////////////////
// Note: all the loops here iterate over the tile
// rather than the full array
// Conversion is simple
// start -> max(start, tile->start_?)
// end -> min(end, tile->end_?)
// As this appears all through the program
// I will only comment about it here to avoid
// repeating everything a bunch of times
/////////////////////////////////////////////
/* Computation of tentative velocity field (f, g) */
void compute_tentative_velocity(float **u, float **v, float **f, float **g,
char **flag, int imax, int jmax, float del_t, float delx, float dely,
float gamma, float Re, struct TileData* tile_data, double * sync_time_taken)
{
// Create the threads outside the two big loops to avoid the overhead of creating and joining the threads twice
// Use firstprivate to provide a private copy of the parameters
#pragma omp parallel firstprivate(imax, jmax, del_t, delx, dely, gamma, Re, tile_data, u, v, f, g, flag) default(none)
{
int i, j;
float du2dx, duvdy, duvdx, dv2dy, laplu, laplv;
// Start a parallel for loop
#pragma omp for collapse(2)
for (i=max(1, tile_data->start_x); i<=min(tile_data->end_x-1,imax-1); i++) { // i=1 i <=imax -1
for (j=max(1, tile_data->start_y); j<=min(tile_data->end_y-1, jmax); j++) { // j=1 j <=jmax
/* only if both adjacent cells are fluid cells */
if ((flag[i][j] & C_F) && (flag[i+1][j] & C_F)) {
du2dx = ((u[i][j]+u[i+1][j])*(u[i][j]+u[i+1][j])+
gamma*fabs(u[i][j]+u[i+1][j])*(u[i][j]-u[i+1][j])-
(u[i-1][j]+u[i][j])*(u[i-1][j]+u[i][j])-
gamma*fabs(u[i-1][j]+u[i][j])*(u[i-1][j]-u[i][j]))
/(4.0*delx);
duvdy = ((v[i][j]+v[i+1][j])*(u[i][j]+u[i][j+1])+
gamma*fabs(v[i][j]+v[i+1][j])*(u[i][j]-u[i][j+1])-
(v[i][j-1]+v[i+1][j-1])*(u[i][j-1]+u[i][j])-
gamma*fabs(v[i][j-1]+v[i+1][j-1])*(u[i][j-1]-u[i][j]))
/(4.0*dely);
laplu = (u[i+1][j]-2.0*u[i][j]+u[i-1][j])/delx/delx+
(u[i][j+1]-2.0*u[i][j]+u[i][j-1])/dely/dely;
f[i][j] = u[i][j]+del_t*(laplu/Re-du2dx-duvdy);
} else {
f[i][j] = u[i][j];
}
}
}
// Start the second parallel for loop
// The alternative would be to run as parallel tasks as well but typically the tile is large enough to occupy all the resources available
#pragma omp for collapse(2)
for (i=max(1, tile_data->start_x); i<=min(tile_data->end_x-1, imax); i++) {
for (j=max(1, tile_data->start_y); j<=min(tile_data->end_y-1, jmax-1); j++) {
/* only if both adjacent cells are fluid cells */
if ((flag[i][j] & C_F) && (flag[i][j+1] & C_F)) {
duvdx = ((u[i][j]+u[i][j+1])*(v[i][j]+v[i+1][j])+
gamma*fabs(u[i][j]+u[i][j+1])*(v[i][j]-v[i+1][j])-
(u[i-1][j]+u[i-1][j+1])*(v[i-1][j]+v[i][j])-
gamma*fabs(u[i-1][j]+u[i-1][j+1])*(v[i-1][j]-v[i][j]))
/(4.0*delx);
dv2dy = ((v[i][j]+v[i][j+1])*(v[i][j]+v[i][j+1])+
gamma*fabs(v[i][j]+v[i][j+1])*(v[i][j]-v[i][j+1])-
(v[i][j-1]+v[i][j])*(v[i][j-1]+v[i][j])-
gamma*fabs(v[i][j-1]+v[i][j])*(v[i][j-1]-v[i][j]))
/(4.0*dely);
laplv = (v[i+1][j]-2.0*v[i][j]+v[i-1][j])/delx/delx+
(v[i][j+1]-2.0*v[i][j]+v[i][j-1])/dely/dely;
g[i][j] = v[i][j]+del_t*(laplv/Re-duvdx-dv2dy);
} else {
g[i][j] = v[i][j];
}
}
}
/* f & g at external boundaries */
// int i,j;
// Parallelise but typically small compared to the previous loop so they don't really make a difference
#pragma omp for
for (j=max(1, tile_data->start_y); j<=min(tile_data->end_y-1, jmax); j++) {
// if (tile_data->start_x == 0) {
f[0][j] = u[0][j];
// }
// if (tile_data->end_x >= imax) {
f[imax][j] = u[imax][j];
// }
}
#pragma omp for
for (i=max(1, tile_data->start_x); i<=min(tile_data->end_x-1, imax); i++) {
// if (tile_data->start_y == 0) {
g[i][0] = v[i][0];
// }
// if (tile_data->end_y >= jmax) {
g[i][jmax] = v[i][jmax];
// }
}
}
// Synchronise f and g as they are used elsewhere in a 5 stencil pattern and so need the edge data
halo_sync(proc, f, tile_data, sync_time_taken);
halo_sync(proc, g, tile_data, sync_time_taken);
}
/* Calculate the right hand side of the pressure equation */
void compute_rhs(float **f, float **g, float **rhs, char **flag, int imax,
int jmax, float del_t, float delx, float dely, struct TileData* tile_data)
{
int i, j;
#pragma omp parallel for collapse(2) private(i, j) firstprivate(imax, jmax, del_t, delx, dely, tile_data, f, g, rhs, flag) default(none)
for (i=max(1, tile_data->start_x);i<=min(imax, tile_data->end_x-1);i++) {
for (j=max(1, tile_data->start_y);j<=min(jmax, tile_data->end_y-1);j++) {
if (flag[i][j] & C_F) {
/* only for fluid and non-surface cells */
rhs[i][j] = (
(f[i][j]-f[i-1][j])/delx +
(g[i][j]-g[i][j-1])/dely
) / del_t;
}
}
}
// RHS doesn't need to be synced as all RHS accesses take place within the same tile (rhs[i][j])
// halo_sync(proc, rhs, tile_data, sync_time_taken);
}
/* Red/Black SOR to solve the poisson equation */
int poisson(float **p, float **rhs, char **flag, int imax, int jmax,
float delx, float dely, float eps, int itermax, float omega,
float *res, int ifull, struct TileData* tile_data, double * sync_time_taken,
double* possion_p_loop_time_taken, double* possion_res_loop_time_taken)
{
int i, j, iter;
float add, beta_2, beta_mod = 0.0;
float p0 = 0.0;
int rb; /* Red-black value. */
float rdx2 = 1.0/(delx*delx);
float rdy2 = 1.0/(dely*dely);
beta_2 = -omega/(2.0*(rdx2+rdy2));
/* Calculate sum of squares */
// #pragma omp parallel for private(i, j) firstprivate(tile_data, imax, jmax, flag, p) reduction(+:p0) default(none) collapse(2)
for (i = max(1, tile_data->start_x); i <= min(imax, tile_data->end_x-1); i++) {
for (j= max(1, tile_data->start_y); j<=min(jmax, tile_data->end_y-1); j++) {
if (flag[i][j] & C_F) { p0 += p[i][j]*p[i][j]; }
}
}
// Perform an all reduce sum with the local p0 sums
// to get the real p0 to every thread
double start_out = MPI_Wtime();
float p0sum;
MPI_Allreduce(&p0, &p0sum, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
p0 = p0sum;
*sync_time_taken += MPI_Wtime() - start_out;
p0 = sqrt(p0/ifull);
if (p0 < 0.0001) { p0 = 1.0; }
int i_start = max(1, tile_data->start_x);
int i_end = min(imax, tile_data->end_x - 1);
int j_start = max(1, tile_data->start_y);
int j_end = min(jmax, tile_data->end_y - 1);
float res_sum_local = 0.0;
/* Red/Black SOR-iteration */
// Create the parallel region here as the loop may run up to 100 times
// The overhead of creating/joining threads 3x100 times would be extremely high
// Local iteration variables are kept private. Iter has to have a local copy to prevent threads all incrementing the shared variable. iter is updated from each thread's local copy of iter_local
// Iteration, matrices and timing variables are shared between threads
// The rest are copied to a thread private copy
#pragma omp parallel private(i, j, add, rb) shared(iter, res_sum_local, sync_time_taken, possion_p_loop_time_taken, possion_res_loop_time_taken) firstprivate(i_start, i_end, j_start, j_end,omega, beta_2, rdx2, rdy2, beta_mod, itermax, res, tile_data, proc, imax, jmax, eps, p0, ifull, nprocs, p, rhs, flag)
{
for (int iter_local = 0; iter_local < itermax; iter_local++) {
double start = 0.0;
for (rb = 0; rb <= 1; rb++) {
start = MPI_Wtime();
// Start the for loop with the threads already created
// No need for private i,j as they are already thread private
// Note: doesn't use collapse as the offset which eliminiates a branch from the hot loop body
// depends on i
#pragma omp for // collapse(2) // collapse takes 2x longer
for (i = i_start; i <=i_end; i++) {
int offset = ((i + j_start) % 2 != rb);
for (j = j_start + offset; j <= j_end; j+=2) {
// if ((i+j) % 2 != rb) { continue; }
if (flag[i][j] == (C_F | B_NSEW)) {
/* five point star for interior fluid cells */
p[i][j] = (1.-omega)*p[i][j] -
beta_2*(
(p[i+1][j]+p[i-1][j])*rdx2
+ (p[i][j+1]+p[i][j-1])*rdy2
- rhs[i][j]
);
} else if (flag[i][j] & C_F) {
/* modified star near boundary */
beta_mod = -omega/((eps_E+eps_W)*rdx2+(eps_N+eps_S)*rdy2);
p[i][j] = (1.-omega)*p[i][j] -
beta_mod*(
(eps_E*p[i+1][j]+eps_W*p[i-1][j])*rdx2
+ (eps_N*p[i][j+1]+eps_S*p[i][j-1])*rdy2
- rhs[i][j]
);
}
// printf("%d: %d\n", omp_get_thread_num(), i);
} /* end of j */
} /* end of i */
// only allow a single thread to run this block, blocks for the other threads
#pragma omp single
{
// Update time taken
double time_taken = MPI_Wtime() - start;
*possion_p_loop_time_taken += time_taken;
// printf("%d: loop %f\n", omp_get_thread_num(), MPI_Wtime() - start);
// Only a single thread is allowed to sync the matrix
halo_sync(proc, p, tile_data, sync_time_taken);
// Ensure only a single thread sets this to 0.0
res_sum_local = 0.0;
}
} /* end of rb */
start = MPI_Wtime();
// Start a parallel for reduction using the res_sum_local variable
#pragma omp for reduction(+:res_sum_local) // collapse(2) collapse takes 2x longer
for (i = i_start; i <= i_end; i++) {
for (j = j_start; j <= j_end; j++) {
if (flag[i][j] & C_F) {
/* only fluid cells */
add = (eps_E*(p[i+1][j]-p[i][j]) -
eps_W*(p[i][j]-p[i-1][j])) * rdx2 +
(eps_N*(p[i][j+1]-p[i][j]) -
eps_S*(p[i][j]-p[i][j-1])) * rdy2 - rhs[i][j];
res_sum_local += add*add;
}
}
}
double time_taken = MPI_Wtime() - start;
// printf("%f res sum local\n", MPI_Wtime() - start);
start = MPI_Wtime();
// sum up the residual across MPI processes
// Only a single thread can execute the MPI send/recv
#pragma omp single
{
// Update time taken
*possion_res_loop_time_taken += time_taken;
/* Partial computation of residual */
*res = res_sum_local;
// Perform a sum reduction and send result to all processes
MPI_Allreduce(&res_sum_local, res, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
*res = sqrt((*res)/ifull)/p0;
// Update the iterator
iter = iter_local;
}
// printf("%f res bcast\n", MPI_Wtime() - start);
/* convergence? */
if (*res<eps) break;
} /* end of iter */
}
return iter;
}
/* Update the velocity values based on the tentative
* velocity values and the new pressure matrix
*/
void update_velocity(float **u, float **v, float **f, float **g, float **p,
char **flag, int imax, int jmax, float del_t, float delx, float dely, struct TileData* tile_data, double * sync_time_taken)
{
#pragma omp parallel firstprivate(u, v, f, g, p, flag, imax, jmax, del_t, delx, dely, tile_data) default(none)
{
int i, j;
#pragma omp for // collapse(2)
for (i=max(1, tile_data->start_x); i<=min(imax-1, tile_data->end_x-1); i++) {
for (j=max(1, tile_data->start_y); j<=min(jmax, tile_data->end_y-1); j++) {
/* only if both adjacent cells are fluid cells */
if ((flag[i][j] & C_F) && (flag[i+1][j] & C_F)) {
u[i][j] = f[i][j]-(p[i+1][j]-p[i][j])*del_t/delx;
}
}
}
#pragma omp for // collapse(2)
for (i=max(1, tile_data->start_x); i<=min(imax, tile_data->end_x - 1); i++) {
for (j=max(1, tile_data->start_y); j<=min(jmax-1, tile_data->end_y - 1); j++) {
/* only if both adjacent cells are fluid cells */
if ((flag[i][j] & C_F) && (flag[i][j+1] & C_F)) {
v[i][j] = g[i][j]-(p[i][j+1]-p[i][j])*del_t/dely;
}
}
}
}
// Sync u,v as they have been updated and are accessed elsewhere in a 5 stencil pattern
halo_sync(proc, u, tile_data, sync_time_taken);
halo_sync(proc, v, tile_data, sync_time_taken);
}
/* Set the timestep size so that we satisfy the Courant-Friedrichs-Lewy
* conditions (ie no particle moves more than one cell width in one
* timestep). Otherwise the simulation becomes unstable.
*/
void set_timestep_interval(float *del_t, int imax, int jmax, float delx,
float dely, float **u, float **v, float Re, float tau, struct TileData* tile_data, double * sync_time_taken)
{
int i, j;
float umax, vmax, umax_local, vmax_local, deltu, deltv, deltRe;
/* del_t satisfying CFL conditions */
if (tau >= 1.0e-10) { /* else no time stepsize control */
umax_local = 1.0e-10;
vmax_local = 1.0e-10;
// No parallelisation as the overhead is too high for gains possible
// Calculate the local umax and vmax
#pragma omp parallel private(i, j) firstprivate(imax, jmax, tile_data, u, v) shared(umax_local, vmax_local) default(none)
{
#pragma omp for collapse(2) reduction(max:umax_local)
for (i=tile_data->start_x; i<=min(imax+1, tile_data->end_x - 1); i++) {
for (j=max(1, tile_data->start_y); j<=min(jmax+1, tile_data->end_y - 1); j++) {
umax_local = max(fabs(u[i][j]), umax_local);
}
}
#pragma omp for collapse(2) reduction(max:vmax_local)
for (i=max(1, tile_data->start_x); i<=min(imax+1, tile_data->end_x - 1); i++) {
for (j=tile_data->start_y; j<=min(jmax+1, tile_data->end_y - 1); j++) {
vmax_local = max(fabs(v[i][j]), vmax_local);
}
}
}
// calculate the global umax and vmax by performing a max reduction on both vars
// using MPI_Allreduce
double start = MPI_Wtime();
float max_buffer[2];
max_buffer[0] = umax_local;
max_buffer[1] = vmax_local;
float max_buffer2[2];
MPI_Allreduce(&max_buffer, &max_buffer2, 2, MPI_FLOAT, MPI_MAX, MPI_COMM_WORLD);
umax = max_buffer2[0];
vmax = max_buffer2[1];
*sync_time_taken += MPI_Wtime() - start;
deltu = delx/umax;
deltv = dely/vmax;
deltRe = 1/(1/(delx*delx)+1/(dely*dely))*Re/2.0;
if (deltu<deltv) {
*del_t = min(deltu, deltRe);
} else {
*del_t = min(deltv, deltRe);
}
*del_t = tau * (*del_t); /* multiply by safety factor */
}
}
|
bspline_create.c | /////////////////////////////////////////////////////////////////////////////
// einspline: a library for creating and evaluating B-splines //
// Copyright (C) 2007 Kenneth P. Esler, Jr. //
// //
// This program is free software; you can redistribute it and/or modify //
// it under the terms of the GNU General Public License as published by //
// the Free Software Foundation; either version 2 of the License, or //
// (at your option) any later version. //
// //
// This program is distributed in the hope that it will be useful, //
// but WITHOUT ANY WARRANTY; without even the implied warranty of //
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
// GNU General Public License for more details. //
// //
// You should have received a copy of the GNU General Public License //
// along with this program; if not, write to the Free Software //
// Foundation, Inc., 51 Franklin Street, Fifth Floor, //
// Boston, MA 02110-1301 USA //
/////////////////////////////////////////////////////////////////////////////
#include "bspline_create.h"
#ifndef _XOPEN_SOURCE
#define _XOPEN_SOURCE 600
#endif
#ifndef __USE_XOPEN2K
#define __USE_XOPEN2K
#endif
#include <stdlib.h>
#include <stdio.h>
#include <inttypes.h>
int posix_memalign(void **memptr, size_t alignment, size_t size);
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Helper functions for spline creation ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
void init_sse_data();
void
find_coefs_1d_d (Ugrid grid, BCtype_d bc,
double *data, intptr_t dstride,
double *coefs, intptr_t cstride);
void
solve_deriv_interp_1d_s (float bands[], float coefs[],
int M, int cstride)
{
// Solve interpolating equations
// First and last rows are different
bands[4*(0)+1] /= bands[4*(0)+0];
bands[4*(0)+2] /= bands[4*(0)+0];
bands[4*(0)+3] /= bands[4*(0)+0];
bands[4*(0)+0] = 1.0;
bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1];
bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2];
bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3];
bands[4*(0)+0] = 0.0;
bands[4*(1)+2] /= bands[4*(1)+1];
bands[4*(1)+3] /= bands[4*(1)+1];
bands[4*(1)+1] = 1.0;
// Now do rows 2 through M+1
for (int row=2; row < (M+1); row++) {
bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2];
bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3];
bands[4*(row)+2] /= bands[4*(row)+1];
bands[4*(row)+3] /= bands[4*(row)+1];
bands[4*(row)+0] = 0.0;
bands[4*(row)+1] = 1.0;
}
// Do last row
bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2];
bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3];
bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2];
bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3];
bands[4*(M+1)+3] /= bands[4*(M+1)+2];
bands[4*(M+1)+2] = 1.0;
coefs[(M+1)*cstride] = bands[4*(M+1)+3];
// Now back substitute up
for (int row=M; row>0; row--)
coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)];
// Finish with first row
coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride];
}
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_periodic_interp_1d_s (float bands[], float coefs[],
int M, size_t cstride) //int M, int cstride)
{
float lastCol[M];
// Now solve:
// First and last rows are different
bands[4*(0)+2] /= bands[4*(0)+1];
bands[4*(0)+0] /= bands[4*(0)+1];
bands[4*(0)+3] /= bands[4*(0)+1];
bands[4*(0)+1] = 1.0;
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0];
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2];
lastCol[0] = bands[4*(0)+0];
for (int row=1; row < (M-1); row++) {
bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2];
bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3];
lastCol[row] = -bands[4*(row)+0] * lastCol[row-1];
bands[4*(row)+0] = 0.0;
bands[4*(row)+2] /= bands[4*(row)+1];
bands[4*(row)+3] /= bands[4*(row)+1];
lastCol[row] /= bands[4*(row)+1];
bands[4*(row)+1] = 1.0;
if (row < (M-2)) {
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3];
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2];
}
}
// Now do last row
// The [2] element and [0] element are now on top of each other
bands[4*(M-1)+0] += bands[4*(M-1)+2];
bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]);
bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3];
bands[4*(M-1)+3] /= bands[4*(M-1)+1];
coefs[M*cstride] = bands[4*(M-1)+3];
for (int row=M-2; row>=0; row--)
coefs[(row+1)*cstride] =
bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride];
coefs[0*cstride] = coefs[M*cstride];
coefs[(M+1)*cstride] = coefs[1*cstride];
coefs[(M+2)*cstride] = coefs[2*cstride];
}
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_antiperiodic_interp_1d_s (float bands[], float coefs[],
int M, int cstride)
{
bands[4*0+0] *= -1.0;
bands[4*(M-1)+2] *= -1.0;
float lastCol[M];
// Now solve:
// First and last rows are different
bands[4*(0)+2] /= bands[4*(0)+1];
bands[4*(0)+0] /= bands[4*(0)+1];
bands[4*(0)+3] /= bands[4*(0)+1];
bands[4*(0)+1] = 1.0;
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0];
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2];
lastCol[0] = bands[4*(0)+0];
for (int row=1; row < (M-1); row++) {
bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2];
bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3];
lastCol[row] = -bands[4*(row)+0] * lastCol[row-1];
bands[4*(row)+0] = 0.0;
bands[4*(row)+2] /= bands[4*(row)+1];
bands[4*(row)+3] /= bands[4*(row)+1];
lastCol[row] /= bands[4*(row)+1];
bands[4*(row)+1] = 1.0;
if (row < (M-2)) {
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3];
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2];
}
}
// Now do last row
// The [2] element and [0] element are now on top of each other
bands[4*(M-1)+0] += bands[4*(M-1)+2];
bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]);
bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3];
bands[4*(M-1)+3] /= bands[4*(M-1)+1];
coefs[M*cstride] = bands[4*(M-1)+3];
for (int row=M-2; row>=0; row--)
coefs[(row+1)*cstride] =
bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride];
coefs[0*cstride] = -coefs[M*cstride];
coefs[(M+1)*cstride] = -coefs[1*cstride];
coefs[(M+2)*cstride] = -coefs[2*cstride];
}
#ifdef HIGH_PRECISION
void
find_coefs_1d_s (Ugrid grid, BCtype_s bc,
float *data, intptr_t dstride,
float *coefs, intptr_t cstride)
{
BCtype_d d_bc;
double *d_data, *d_coefs;
d_bc.lCode = bc.lCode; d_bc.rCode = bc.rCode;
d_bc.lVal = bc.lVal; d_bc.rVal = bc.rVal;
int M = grid.num, N;
if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) N = M+3;
else N = M+2;
d_data = malloc (N*sizeof(double));
d_coefs = malloc (N*sizeof(double));
for (int i=0; i<M; i++)
d_data[i] = data[i*dstride];
find_coefs_1d_d (grid, d_bc, d_data, 1, d_coefs, 1);
for (int i=0; i<N; i++)
coefs[i*cstride] = d_coefs[i];
free (d_data);
free (d_coefs);
}
#else
void
find_coefs_1d_s (Ugrid grid, BCtype_s bc,
float *data, intptr_t dstride,
float *coefs, intptr_t cstride)
{
size_t M = grid.num;
float basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0};
if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) {
#ifdef HAVE_C_VARARRAYS
float bands[4*M];
#else
float *bands = malloc(4*M*sizeof(float));
#endif
for (size_t i=0; i<M; i++) {
bands[4*i+0] = basis[0];
bands[4*i+1] = basis[1];
bands[4*i+2] = basis[2];
bands[4*i+3] = data[i*dstride];
}
if (bc.lCode == PERIODIC)
solve_periodic_interp_1d_s (bands, coefs, M, cstride);
else
solve_antiperiodic_interp_1d_s (bands, coefs, M, cstride);
#ifndef HAVE_C_VARARRAYS
free (bands);
#endif
}
else {
// Setup boundary conditions
float abcd_left[4], abcd_right[4];
// Left boundary
if (bc.lCode == FLAT || bc.lCode == NATURAL)
bc.lVal = 0.0;
if (bc.lCode == FLAT || bc.lCode == DERIV1) {
abcd_left[0] = -0.5 * grid.delta_inv;
abcd_left[1] = 0.0 * grid.delta_inv;
abcd_left[2] = 0.5 * grid.delta_inv;
abcd_left[3] = bc.lVal;
}
if (bc.lCode == NATURAL || bc.lCode == DERIV2) {
abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv;
abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv;
abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv;
abcd_left[3] = bc.lVal;
}
// Right boundary
if (bc.rCode == FLAT || bc.rCode == NATURAL)
bc.rVal = 0.0;
if (bc.rCode == FLAT || bc.rCode == DERIV1) {
abcd_right[0] = -0.5 * grid.delta_inv;
abcd_right[1] = 0.0 * grid.delta_inv;
abcd_right[2] = 0.5 * grid.delta_inv;
abcd_right[3] = bc.rVal;
}
if (bc.rCode == NATURAL || bc.rCode == DERIV2) {
abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv;
abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv;
abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv;
abcd_right[3] = bc.rVal;
}
#ifdef HAVE_C_VARARRAYS
float bands[4*(M+2)];
#else
float *bands = malloc ((M+2)*4*sizeof(float));
#endif
for (int i=0; i<4; i++) {
bands[4*( 0 )+i] = abcd_left[i];
bands[4*(M+1)+i] = abcd_right[i];
}
for (int i=0; i<M; i++) {
for (int j=0; j<3; j++)
bands[4*(i+1)+j] = basis[j];
bands[4*(i+1)+3] = data[i*dstride];
}
// Now, solve for coefficients
solve_deriv_interp_1d_s (bands, coefs, M, cstride);
#ifndef HAVE_C_VARARRAYS
free (bands);
#endif
}
}
#endif
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Single-Precision, Real Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
UBspline_1d_s*
create_UBspline_1d_s (Ugrid x_grid, BCtype_s xBC, float *data)
{
// Create new spline
UBspline_1d_s* restrict spline = malloc (sizeof(UBspline_1d_s));
spline->spcode = U1D;
spline->tcode = SINGLE_REAL;
spline->xBC = xBC; spline->x_grid = x_grid;
// Setup internal variables
int M = x_grid.num;
int N;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
N = M+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
N = M+2;
}
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
#ifndef HAVE_SSE2
spline->coefs = malloc (sizeof(float)*N);
#else
posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*N));
#endif
find_coefs_1d_s (spline->x_grid, xBC, data, 1, spline->coefs, 1);
init_sse_data();
return spline;
}
void
recompute_UBspline_1d_s (UBspline_1d_s* spline, float *data)
{
find_coefs_1d_s (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1);
}
UBspline_2d_s*
create_UBspline_2d_s (Ugrid x_grid, Ugrid y_grid,
BCtype_s xBC, BCtype_s yBC, float *data)
{
// Create new spline
UBspline_2d_s* restrict spline = malloc (sizeof(UBspline_2d_s));
spline->spcode = U2D;
spline->tcode = SINGLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
spline->x_stride = Ny;
#ifndef HAVE_SSE2
spline->coefs = malloc (sizeof(float)*Nx*Ny);
#else
posix_memalign ((void**)&spline->coefs, 16, sizeof(float)*Nx*Ny);
#endif
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = iy;
intptr_t coffset = iy;
find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My,
spline->coefs+coffset, Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = ix*Ny;
intptr_t coffset = ix*Ny;
find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
init_sse_data();
return spline;
}
void
recompute_UBspline_2d_s (UBspline_2d_s* spline, float *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = iy;
intptr_t coffset = iy;
find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My,
spline->coefs+coffset, Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = ix*Ny;
intptr_t coffset = ix*Ny;
find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
}
UBspline_3d_s*
create_UBspline_3d_s (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_s xBC, BCtype_s yBC, BCtype_s zBC,
float *data)
{
// Create new spline
UBspline_3d_s* spline = malloc (sizeof(UBspline_3d_s));
spline->spcode = U3D;
spline->tcode = SINGLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3;
else Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
spline->x_stride = Ny*Nz;
spline->y_stride = Nz;
spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz;
#ifndef HAVE_SSE2
spline->coefs = malloc (sizeof(float)*spline->coefs_size);
#else
posix_memalign ((void**)&spline->coefs, 16, (sizeof(float)*spline->coefs_size));
#endif
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = iy*Mz+iz;
intptr_t coffset = iy*Nz+iz;
find_coefs_1d_s (spline->x_grid, xBC, data+doffset, My*Mz,
spline->coefs+coffset, Ny*Nz);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = ix*Ny*Nz + iz;
intptr_t coffset = ix*Ny*Nz + iz;
find_coefs_1d_s (spline->y_grid, yBC, spline->coefs+doffset, Nz,
spline->coefs+coffset, Nz);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = (ix*Ny+iy)*Nz;
intptr_t coffset = (ix*Ny+iy)*Nz;
find_coefs_1d_s (spline->z_grid, zBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
init_sse_data();
return spline;
}
void
recompute_UBspline_3d_s (UBspline_3d_s* spline, float *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3;
else Nz = Mz+2;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = iy*Mz+iz;
intptr_t coffset = iy*Nz+iz;
find_coefs_1d_s (spline->x_grid, spline->xBC, data+doffset, My*Mz,
spline->coefs+coffset, Ny*Nz);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = ix*Ny*Nz + iz;
intptr_t coffset = ix*Ny*Nz + iz;
find_coefs_1d_s (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz,
spline->coefs+coffset, Nz);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = (ix*Ny+iy)*Nz;
intptr_t coffset = (ix*Ny+iy)*Nz;
find_coefs_1d_s (spline->z_grid, spline->zBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Single-Precision, Complex Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
UBspline_1d_c*
create_UBspline_1d_c (Ugrid x_grid, BCtype_c xBC, complex_float *data)
{
// Create new spline
UBspline_1d_c* restrict spline = malloc (sizeof(UBspline_1d_c));
spline->spcode = U1D;
spline->tcode = SINGLE_COMPLEX;
spline->xBC = xBC;
// Setup internal variables
int M = x_grid.num;
int N;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
N = M+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
N = M+2;
}
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
#ifndef HAVE_SSE2
spline->coefs = malloc (2*sizeof(float)*N);
#else
posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*N);
#endif
BCtype_s xBC_r, xBC_i;
xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode;
xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r;
xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode;
xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i;
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r,
(float*)data, 2, (float*)spline->coefs, 2);
// Imaginarty part
find_coefs_1d_s (spline->x_grid, xBC_i,
((float*)data)+1, 2, ((float*)spline->coefs+1), 2);
init_sse_data();
return spline;
}
void
recompute_UBspline_1d_c (UBspline_1d_c* spline, complex_float *data)
{
BCtype_s xBC_r, xBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r,
(float*)data, 2, (float*)spline->coefs, 2);
// Imaginarty part
find_coefs_1d_s (spline->x_grid, xBC_i,
((float*)data)+1, 2, ((float*)spline->coefs+1), 2);
}
UBspline_2d_c*
create_UBspline_2d_c (Ugrid x_grid, Ugrid y_grid,
BCtype_c xBC, BCtype_c yBC, complex_float *data)
{
// Create new spline
UBspline_2d_c* restrict spline = malloc (sizeof(UBspline_2d_c));
spline->spcode = U2D;
spline->tcode = SINGLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
spline->x_stride = Ny;
#ifndef HAVE_SSE2
spline->coefs = malloc (2*sizeof(float)*Nx*Ny);
#else
posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny);
#endif
BCtype_s xBC_r, xBC_i, yBC_r, yBC_i;
xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode;
xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r;
xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode;
xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i;
yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode;
yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r;
yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode;
yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = 2*iy;
intptr_t coffset = 2*iy;
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My,
(float*)spline->coefs+coffset, 2*Ny);
// Imag part
find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My,
((float*)spline->coefs)+coffset+1, 2*Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = 2*ix*Ny;
intptr_t coffset = 2*ix*Ny;
// Real part
find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2,
((float*)spline->coefs)+coffset, 2);
// Imag part
find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2,
((float*)spline->coefs)+coffset+1, 2);
}
init_sse_data();
return spline;
}
void
recompute_UBspline_2d_c (UBspline_2d_c* spline, complex_float *data)
{
// Setup internal variables
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
BCtype_s xBC_r, xBC_i, yBC_r, yBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = 2*iy;
intptr_t coffset = 2*iy;
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My,
(float*)spline->coefs+coffset, 2*Ny);
// Imag part
find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My,
((float*)spline->coefs)+coffset+1, 2*Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = 2*ix*Ny;
intptr_t coffset = 2*ix*Ny;
// Real part
find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2,
((float*)spline->coefs)+coffset, 2);
// Imag part
find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2,
((float*)spline->coefs)+coffset+1, 2);
}
}
UBspline_3d_c*
create_UBspline_3d_c (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_c xBC, BCtype_c yBC, BCtype_c zBC,
complex_float *data)
{
// Create new spline
UBspline_3d_c* restrict spline = malloc (sizeof(UBspline_3d_c));
spline->spcode = U3D;
spline->tcode = SINGLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3;
else Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
spline->x_stride = Ny*Nz;
spline->y_stride = Nz;
#ifndef HAVE_SSE2
spline->coefs = malloc (2*sizeof(float)*Nx*Ny*Nz);
#else
posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(float)*Nx*Ny*Nz);
#endif
BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i;
xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode;
xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r;
xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode;
xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i;
yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode;
yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r;
yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode;
yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i;
zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode;
zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r;
zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode;
zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = 2*(iy*Mz+iz);
intptr_t coffset = 2*(iy*Nz+iz);
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz,
((float*)spline->coefs)+coffset, 2*Ny*Nz);
// Imag part
find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz,
((float*)spline->coefs)+coffset+1, 2*Ny*Nz);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = 2*(ix*Ny*Nz + iz);
intptr_t coffset = 2*(ix*Ny*Nz + iz);
// Real part
find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz,
((float*)spline->coefs)+coffset, 2*Nz);
// Imag part
find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz,
((float*)spline->coefs)+coffset+1, 2*Nz);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = 2*((ix*Ny+iy)*Nz);
intptr_t coffset = 2*((ix*Ny+iy)*Nz);
// Real part
find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2,
((float*)spline->coefs)+coffset, 2);
// Imag part
find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2,
((float*)spline->coefs)+coffset+1, 2);
}
init_sse_data();
return spline;
}
void
recompute_UBspline_3d_c (UBspline_3d_c* spline, complex_float *data)
{
// Setup internal variables
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC) Nz = Mz+3;
else Nz = Mz+2;
BCtype_s xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode;
zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r;
zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode;
zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = 2*(iy*Mz+iz);
intptr_t coffset = 2*(iy*Nz+iz);
// Real part
find_coefs_1d_s (spline->x_grid, xBC_r, ((float*)data)+doffset, 2*My*Mz,
((float*)spline->coefs)+coffset, 2*Ny*Nz);
// Imag part
find_coefs_1d_s (spline->x_grid, xBC_i, ((float*)data)+doffset+1, 2*My*Mz,
((float*)spline->coefs)+coffset+1, 2*Ny*Nz);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = 2*(ix*Ny*Nz + iz);
intptr_t coffset = 2*(ix*Ny*Nz + iz);
// Real part
find_coefs_1d_s (spline->y_grid, yBC_r, ((float*)spline->coefs)+doffset, 2*Nz,
((float*)spline->coefs)+coffset, 2*Nz);
// Imag part
find_coefs_1d_s (spline->y_grid, yBC_i, ((float*)spline->coefs)+doffset+1, 2*Nz,
((float*)spline->coefs)+coffset+1, 2*Nz);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = 2*((ix*Ny+iy)*Nz);
intptr_t coffset = 2*((ix*Ny+iy)*Nz);
// Real part
find_coefs_1d_s (spline->z_grid, zBC_r, ((float*)spline->coefs)+doffset, 2,
((float*)spline->coefs)+coffset, 2);
// Imag part
find_coefs_1d_s (spline->z_grid, zBC_i, ((float*)spline->coefs)+doffset+1, 2,
((float*)spline->coefs)+coffset+1, 2);
}
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Double-Precision, Real Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_deriv_interp_1d_d (double bands[], double coefs[],
int M, int cstride)
{
// Solve interpolating equations
// First and last rows are different
bands[4*(0)+1] /= bands[4*(0)+0];
bands[4*(0)+2] /= bands[4*(0)+0];
bands[4*(0)+3] /= bands[4*(0)+0];
bands[4*(0)+0] = 1.0;
bands[4*(1)+1] -= bands[4*(1)+0]*bands[4*(0)+1];
bands[4*(1)+2] -= bands[4*(1)+0]*bands[4*(0)+2];
bands[4*(1)+3] -= bands[4*(1)+0]*bands[4*(0)+3];
bands[4*(0)+0] = 0.0;
bands[4*(1)+2] /= bands[4*(1)+1];
bands[4*(1)+3] /= bands[4*(1)+1];
bands[4*(1)+1] = 1.0;
// Now do rows 2 through M+1
for (int row=2; row < (M+1); row++) {
bands[4*(row)+1] -= bands[4*(row)+0]*bands[4*(row-1)+2];
bands[4*(row)+3] -= bands[4*(row)+0]*bands[4*(row-1)+3];
bands[4*(row)+2] /= bands[4*(row)+1];
bands[4*(row)+3] /= bands[4*(row)+1];
bands[4*(row)+0] = 0.0;
bands[4*(row)+1] = 1.0;
}
// Do last row
bands[4*(M+1)+1] -= bands[4*(M+1)+0]*bands[4*(M-1)+2];
bands[4*(M+1)+3] -= bands[4*(M+1)+0]*bands[4*(M-1)+3];
bands[4*(M+1)+2] -= bands[4*(M+1)+1]*bands[4*(M)+2];
bands[4*(M+1)+3] -= bands[4*(M+1)+1]*bands[4*(M)+3];
bands[4*(M+1)+3] /= bands[4*(M+1)+2];
bands[4*(M+1)+2] = 1.0;
coefs[(M+1)*cstride] = bands[4*(M+1)+3];
// Now back substitute up
for (int row=M; row>0; row--)
coefs[row*cstride] = bands[4*(row)+3] - bands[4*(row)+2]*coefs[cstride*(row+1)];
// Finish with first row
coefs[0] = bands[4*(0)+3] - bands[4*(0)+1]*coefs[1*cstride] - bands[4*(0)+2]*coefs[2*cstride];
}
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_periodic_interp_1d_d (double bands[], double coefs[],
int M, intptr_t cstride)
{
double lastCol[M];
// Now solve:
// First and last rows are different
bands[4*(0)+2] /= bands[4*(0)+1];
bands[4*(0)+0] /= bands[4*(0)+1];
bands[4*(0)+3] /= bands[4*(0)+1];
bands[4*(0)+1] = 1.0;
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0];
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2];
lastCol[0] = bands[4*(0)+0];
for (int row=1; row < (M-1); row++) {
bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2];
bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3];
lastCol[row] = -bands[4*(row)+0] * lastCol[row-1];
bands[4*(row)+0] = 0.0;
bands[4*(row)+2] /= bands[4*(row)+1];
bands[4*(row)+3] /= bands[4*(row)+1];
lastCol[row] /= bands[4*(row)+1];
bands[4*(row)+1] = 1.0;
if (row < (M-2)) {
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3];
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2];
}
}
// Now do last row
// The [2] element and [0] element are now on top of each other
bands[4*(M-1)+0] += bands[4*(M-1)+2];
bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]);
bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3];
bands[4*(M-1)+3] /= bands[4*(M-1)+1];
coefs[M*cstride] = bands[4*(M-1)+3];
for (int row=M-2; row>=0; row--)
coefs[(row+1)*cstride] =
bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride];
coefs[0*cstride] = coefs[M*cstride];
coefs[(M+1)*cstride] = coefs[1*cstride];
coefs[(M+2)*cstride] = coefs[2*cstride];
}
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
void
solve_antiperiodic_interp_1d_d (double bands[], double coefs[],
int M, int cstride)
{
double lastCol[M];
bands[4*0+0] *= -1.0;
bands[4*(M-1)+2] *= -1.0;
// Now solve:
// First and last rows are different
bands[4*(0)+2] /= bands[4*(0)+1];
bands[4*(0)+0] /= bands[4*(0)+1];
bands[4*(0)+3] /= bands[4*(0)+1];
bands[4*(0)+1] = 1.0;
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*bands[4*(0)+0];
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(0)+3];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(0)+2];
lastCol[0] = bands[4*(0)+0];
for (int row=1; row < (M-1); row++) {
bands[4*(row)+1] -= bands[4*(row)+0] * bands[4*(row-1)+2];
bands[4*(row)+3] -= bands[4*(row)+0] * bands[4*(row-1)+3];
lastCol[row] = -bands[4*(row)+0] * lastCol[row-1];
bands[4*(row)+0] = 0.0;
bands[4*(row)+2] /= bands[4*(row)+1];
bands[4*(row)+3] /= bands[4*(row)+1];
lastCol[row] /= bands[4*(row)+1];
bands[4*(row)+1] = 1.0;
if (row < (M-2)) {
bands[4*(M-1)+3] -= bands[4*(M-1)+2]*bands[4*(row)+3];
bands[4*(M-1)+1] -= bands[4*(M-1)+2]*lastCol[row];
bands[4*(M-1)+2] = -bands[4*(M-1)+2]*bands[4*(row)+2];
}
}
// Now do last row
// The [2] element and [0] element are now on top of each other
bands[4*(M-1)+0] += bands[4*(M-1)+2];
bands[4*(M-1)+1] -= bands[4*(M-1)+0] * (bands[4*(M-2)+2]+lastCol[M-2]);
bands[4*(M-1)+3] -= bands[4*(M-1)+0] * bands[4*(M-2)+3];
bands[4*(M-1)+3] /= bands[4*(M-1)+1];
coefs[M*cstride] = bands[4*(M-1)+3];
for (int row=M-2; row>=0; row--)
coefs[(row+1)*cstride] =
bands[4*(row)+3] - bands[4*(row)+2]*coefs[(row+2)*cstride] - lastCol[row]*coefs[M*cstride];
coefs[0*cstride] = -coefs[M*cstride];
coefs[(M+1)*cstride] = -coefs[1*cstride];
coefs[(M+2)*cstride] = -coefs[2*cstride];
}
void
find_coefs_1d_d (Ugrid grid, BCtype_d bc,
double *data, intptr_t dstride,
double *coefs, intptr_t cstride)
{
int M = grid.num;
double basis[4] = {1.0/6.0, 2.0/3.0, 1.0/6.0, 0.0};
if (bc.lCode == PERIODIC || bc.lCode == ANTIPERIODIC) {
#ifdef HAVE_C_VARARRAYS
double bands[M*4];
#else
double *bands = malloc (4*M*sizeof(double));
#endif
for (int i=0; i<M; i++) {
bands[4*i+0] = basis[0];
bands[4*i+1] = basis[1];
bands[4*i+2] = basis[2];
bands[4*i+3] = data[i*dstride];
}
if (bc.lCode == ANTIPERIODIC)
solve_antiperiodic_interp_1d_d (bands, coefs, M, cstride);
else
solve_periodic_interp_1d_d (bands, coefs, M, cstride);
#ifndef HAVE_C_VARARRAYS
free (bands);
#endif
}
else {
// Setup boundary conditions
double abcd_left[4], abcd_right[4];
// Left boundary
if (bc.lCode == FLAT || bc.lCode == NATURAL)
bc.lVal = 0.0;
if (bc.lCode == FLAT || bc.lCode == DERIV1) {
abcd_left[0] = -0.5 * grid.delta_inv;
abcd_left[1] = 0.0 * grid.delta_inv;
abcd_left[2] = 0.5 * grid.delta_inv;
abcd_left[3] = bc.lVal;
}
if (bc.lCode == NATURAL || bc.lCode == DERIV2) {
abcd_left[0] = 1.0 * grid.delta_inv * grid.delta_inv;
abcd_left[1] =-2.0 * grid.delta_inv * grid.delta_inv;
abcd_left[2] = 1.0 * grid.delta_inv * grid.delta_inv;
abcd_left[3] = bc.lVal;
}
// Right boundary
if (bc.rCode == FLAT || bc.rCode == NATURAL)
bc.rVal = 0.0;
if (bc.rCode == FLAT || bc.rCode == DERIV1) {
abcd_right[0] = -0.5 * grid.delta_inv;
abcd_right[1] = 0.0 * grid.delta_inv;
abcd_right[2] = 0.5 * grid.delta_inv;
abcd_right[3] = bc.rVal;
}
if (bc.rCode == NATURAL || bc.rCode == DERIV2) {
abcd_right[0] = 1.0 *grid.delta_inv * grid.delta_inv;
abcd_right[1] =-2.0 *grid.delta_inv * grid.delta_inv;
abcd_right[2] = 1.0 *grid.delta_inv * grid.delta_inv;
abcd_right[3] = bc.rVal;
}
#ifdef HAVE_C_VARARRAYS
double bands[(M+2)*4];
#else
double *bands = malloc ((M+2)*4*sizeof(double));
#endif
for (int i=0; i<4; i++) {
bands[4*( 0 )+i] = abcd_left[i];
bands[4*(M+1)+i] = abcd_right[i];
}
for (int i=0; i<M; i++) {
for (int j=0; j<3; j++)
bands[4*(i+1)+j] = basis[j];
bands[4*(i+1)+3] = data[i*dstride];
}
// Now, solve for coefficients
solve_deriv_interp_1d_d (bands, coefs, M, cstride);
#ifndef HAVE_C_VARARRAYS
free (bands);
#endif
}
}
UBspline_1d_d*
create_UBspline_1d_d (Ugrid x_grid, BCtype_d xBC, double *data)
{
// Create new spline
UBspline_1d_d* restrict spline = malloc (sizeof(UBspline_1d_d));
spline->spcode = U1D;
spline->tcode = DOUBLE_REAL;
spline->xBC = xBC;
// Setup internal variables
int M = x_grid.num;
int N;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
N = M+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
N = M+2;
}
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
#ifndef HAVE_SSE2
spline->coefs = malloc (sizeof(double)*N);
#else
posix_memalign ((void**)&spline->coefs, 16, sizeof(double)*N);
#endif
if(data != NULL) // only data is provided
find_coefs_1d_d (spline->x_grid, xBC, data, 1, spline->coefs, 1);
init_sse_data();
return spline;
}
void
recompute_UBspline_1d_d (UBspline_1d_d* spline, double *data)
{
find_coefs_1d_d (spline->x_grid, spline->xBC, data, 1, spline->coefs, 1);
}
UBspline_2d_d*
create_UBspline_2d_d (Ugrid x_grid, Ugrid y_grid,
BCtype_d xBC, BCtype_d yBC, double *data)
{
// Create new spline
UBspline_2d_d* restrict spline = malloc (sizeof(UBspline_2d_d));
spline->spcode = U2D;
spline->tcode = DOUBLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
spline->x_stride = Ny;
#ifndef HAVE_SSE2
spline->coefs = malloc (sizeof(double)*Nx*Ny);
#else
posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*Nx*Ny));
#endif
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = iy;
intptr_t coffset = iy;
find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My,
spline->coefs+coffset, Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = ix*Ny;
intptr_t coffset = ix*Ny;
find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
init_sse_data();
return spline;
}
void
recompute_UBspline_2d_d (UBspline_2d_d* spline, double *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = iy;
intptr_t coffset = iy;
find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My,
spline->coefs+coffset, Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = ix*Ny;
intptr_t coffset = ix*Ny;
find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
}
UBspline_3d_d*
create_UBspline_3d_d (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_d xBC, BCtype_d yBC, BCtype_d zBC,
double *data)
{
// Create new spline
UBspline_3d_d* restrict spline = malloc (sizeof(UBspline_3d_d));
spline->spcode = U3D;
spline->tcode = DOUBLE_REAL;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3;
else Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
spline->x_stride = Ny*Nz;
spline->y_stride = Nz;
spline->coefs_size=(size_t)Nx*(size_t)Ny*(size_t)Nz;
#ifndef HAVE_SSE2
spline->coefs = malloc (sizeof(double)*spline->coefs_size);
#else
posix_memalign ((void**)&spline->coefs, 16, (sizeof(double)*spline->coefs_size));
#endif
if(data != NULL) // only data is provided
{
// First, solve in the X-direction
#pragma omp parallel for
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = iy*Mz+iz;
intptr_t coffset = iy*Nz+iz;
find_coefs_1d_d (spline->x_grid, xBC, data+doffset, My*Mz,
spline->coefs+coffset, Ny*Nz);
}
// Now, solve in the Y-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = ix*Ny*Nz + iz;
intptr_t coffset = ix*Ny*Nz + iz;
find_coefs_1d_d (spline->y_grid, yBC, spline->coefs+doffset, Nz,
spline->coefs+coffset, Nz);
}
// Now, solve in the Z-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = (ix*Ny+iy)*Nz;
intptr_t coffset = (ix*Ny+iy)*Nz;
find_coefs_1d_d (spline->z_grid, zBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
}
init_sse_data();
return spline;
}
void
recompute_UBspline_3d_d (UBspline_3d_d* spline, double *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
// First, solve in the X-direction
#pragma omp parallel for
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = iy*Mz+iz;
intptr_t coffset = iy*Nz+iz;
find_coefs_1d_d (spline->x_grid, spline->xBC, data+doffset, My*Mz,
spline->coefs+coffset, Ny*Nz);
}
// Now, solve in the Y-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = ix*Ny*Nz + iz;
intptr_t coffset = ix*Ny*Nz + iz;
find_coefs_1d_d (spline->y_grid, spline->yBC, spline->coefs+doffset, Nz,
spline->coefs+coffset, Nz);
}
// Now, solve in the Z-direction
#pragma omp parallel for
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = (ix*Ny+iy)*Nz;
intptr_t coffset = (ix*Ny+iy)*Nz;
find_coefs_1d_d (spline->z_grid, spline->zBC, spline->coefs+doffset, 1,
spline->coefs+coffset, 1);
}
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
//// Double-Precision, Complex Creation Routines ////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
// On input, bands should be filled with:
// row 0 : abcdInitial from boundary conditions
// rows 1:M: basis functions in first 3 cols, data in last
// row M+1 : abcdFinal from boundary conditions
// cstride gives the stride between values in coefs.
// On exit, coefs with contain interpolating B-spline coefs
UBspline_1d_z*
create_UBspline_1d_z (Ugrid x_grid, BCtype_z xBC, complex_double *data)
{
// Create new spline
UBspline_1d_z* restrict spline = malloc (sizeof(UBspline_1d_z));
spline->spcode = U1D;
spline->tcode = DOUBLE_COMPLEX;
spline->xBC = xBC;
// Setup internal variables
int M = x_grid.num;
int N;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num);
N = M+3;
}
else {
x_grid.delta = (x_grid.end-x_grid.start)/(double)(x_grid.num-1);
N = M+2;
}
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
#ifndef HAVE_SSE2
spline->coefs = malloc (2*sizeof(double)*N);
#else
posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*N);
#endif
BCtype_d xBC_r, xBC_i;
xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode;
xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r;
xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode;
xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2,
(double*)spline->coefs, 2);
// Imaginarty part
find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2,
((double*)spline->coefs)+1, 2);
init_sse_data();
return spline;
}
void
recompute_UBspline_1d_z (UBspline_1d_z* spline, complex_double *data)
{
int M = spline->x_grid.num;
int N;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
N = M+3;
else
N = M+2;
BCtype_d xBC_r, xBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r, (double*)data, 2,
(double*)spline->coefs, 2);
// Imaginarty part
find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+1, 2,
((double*)spline->coefs)+1, 2);
}
UBspline_2d_z*
create_UBspline_2d_z (Ugrid x_grid, Ugrid y_grid,
BCtype_z xBC, BCtype_z yBC, complex_double *data)
{
// Create new spline
UBspline_2d_z* restrict spline = malloc (sizeof(UBspline_2d_z));
spline->spcode = U2D;
spline->tcode = DOUBLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
// Setup internal variables
int Mx = x_grid.num;
int My = y_grid.num;
int Nx, Ny;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
spline->x_stride = Ny;
#ifndef HAVE_SSE2
spline->coefs = malloc (2*sizeof(double)*Nx*Ny);
#else
posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny);
#endif
BCtype_d xBC_r, xBC_i, yBC_r, yBC_i;
xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode;
xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r;
xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode;
xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i;
yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode;
yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r;
yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode;
yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = 2*iy;
intptr_t coffset = 2*iy;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My,
(double*)spline->coefs+coffset, 2*Ny);
// Imag part
find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My,
((double*)spline->coefs)+coffset+1, 2*Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = 2*ix*Ny;
intptr_t coffset = 2*ix*Ny;
// Real part
find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2,
(double*)spline->coefs+coffset, 2);
// Imag part
find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2,
((double*)spline->coefs)+coffset+1, 2);
}
init_sse_data();
return spline;
}
void
recompute_UBspline_2d_z (UBspline_2d_z* spline, complex_double *data)
{
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Nx, Ny;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
BCtype_d xBC_r, xBC_i, yBC_r, yBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++) {
intptr_t doffset = 2*iy;
intptr_t coffset = 2*iy;
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data+doffset), 2*My,
(double*)spline->coefs+coffset, 2*Ny);
// Imag part
find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My,
((double*)spline->coefs)+coffset+1, 2*Ny);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++) {
intptr_t doffset = 2*ix*Ny;
intptr_t coffset = 2*ix*Ny;
// Real part
find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2,
(double*)spline->coefs+coffset, 2);
// Imag part
find_coefs_1d_d (spline->y_grid, yBC_i, (double*)spline->coefs+doffset+1, 2,
((double*)spline->coefs)+coffset+1, 2);
}
}
UBspline_3d_z*
create_UBspline_3d_z (Ugrid x_grid, Ugrid y_grid, Ugrid z_grid,
BCtype_z xBC, BCtype_z yBC, BCtype_z zBC,
complex_double *data)
{
// Create new spline
UBspline_3d_z* restrict spline = malloc (sizeof(UBspline_3d_z));
spline->spcode = U3D;
spline->tcode = DOUBLE_COMPLEX;
spline->xBC = xBC;
spline->yBC = yBC;
spline->zBC = zBC;
// Setup internal variables
int Mx = x_grid.num; int My = y_grid.num; int Mz = z_grid.num;
int Nx, Ny, Nz;
if (xBC.lCode == PERIODIC || xBC.lCode == ANTIPERIODIC) Nx = Mx+3;
else Nx = Mx+2;
x_grid.delta = (x_grid.end - x_grid.start)/(double)(Nx-3);
x_grid.delta_inv = 1.0/x_grid.delta;
spline->x_grid = x_grid;
if (yBC.lCode == PERIODIC || yBC.lCode == ANTIPERIODIC) Ny = My+3;
else Ny = My+2;
y_grid.delta = (y_grid.end - y_grid.start)/(double)(Ny-3);
y_grid.delta_inv = 1.0/y_grid.delta;
spline->y_grid = y_grid;
if (zBC.lCode == PERIODIC || zBC.lCode == ANTIPERIODIC) Nz = Mz+3;
else Nz = Mz+2;
z_grid.delta = (z_grid.end - z_grid.start)/(double)(Nz-3);
z_grid.delta_inv = 1.0/z_grid.delta;
spline->z_grid = z_grid;
spline->x_stride = Ny*Nz;
spline->y_stride = Nz;
#ifndef HAVE_SSE2
spline->coefs = malloc (2*sizeof(double)*Nx*Ny*Nz);
#else
posix_memalign ((void**)&spline->coefs, 16, 2*sizeof(double)*Nx*Ny*Nz);
#endif
BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i;
xBC_r.lCode = xBC.lCode; xBC_r.rCode = xBC.rCode;
xBC_r.lVal = xBC.lVal_r; xBC_r.rVal = xBC.rVal_r;
xBC_i.lCode = xBC.lCode; xBC_i.rCode = xBC.rCode;
xBC_i.lVal = xBC.lVal_i; xBC_i.rVal = xBC.rVal_i;
yBC_r.lCode = yBC.lCode; yBC_r.rCode = yBC.rCode;
yBC_r.lVal = yBC.lVal_r; yBC_r.rVal = yBC.rVal_r;
yBC_i.lCode = yBC.lCode; yBC_i.rCode = yBC.rCode;
yBC_i.lVal = yBC.lVal_i; yBC_i.rVal = yBC.rVal_i;
zBC_r.lCode = zBC.lCode; zBC_r.rCode = zBC.rCode;
zBC_r.lVal = zBC.lVal_r; zBC_r.rVal = zBC.rVal_r;
zBC_i.lCode = zBC.lCode; zBC_i.rCode = zBC.rCode;
zBC_i.lVal = zBC.lVal_i; zBC_i.rVal = zBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = 2*(iy*Mz+iz);
intptr_t coffset = 2*(iy*Nz+iz);
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz,
((double*)spline->coefs)+coffset, 2*Ny*Nz);
// Imag part
find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz,
((double*)spline->coefs)+coffset+1, 2*Ny*Nz);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = 2*(ix*Ny*Nz + iz);
intptr_t coffset = 2*(ix*Ny*Nz + iz);
// Real part
find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz,
((double*)spline->coefs)+coffset, 2*Nz);
// Imag part
find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz,
((double*)spline->coefs)+coffset+1, 2*Nz);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = 2*((ix*Ny+iy)*Nz);
intptr_t coffset = 2*((ix*Ny+iy)*Nz);
// Real part
find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2,
((double*)spline->coefs)+coffset, 2);
// Imag part
find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2,
((double*)spline->coefs)+coffset+1, 2);
}
init_sse_data();
return spline;
}
void
recompute_UBspline_3d_z (UBspline_3d_z* spline, complex_double *data)
{
// Setup internal variables
int Mx = spline->x_grid.num;
int My = spline->y_grid.num;
int Mz = spline->z_grid.num;
int Nx, Ny, Nz;
if (spline->xBC.lCode == PERIODIC || spline->xBC.lCode == ANTIPERIODIC)
Nx = Mx+3;
else
Nx = Mx+2;
if (spline->yBC.lCode == PERIODIC || spline->yBC.lCode == ANTIPERIODIC)
Ny = My+3;
else
Ny = My+2;
if (spline->zBC.lCode == PERIODIC || spline->zBC.lCode == ANTIPERIODIC)
Nz = Mz+3;
else
Nz = Mz+2;
BCtype_d xBC_r, xBC_i, yBC_r, yBC_i, zBC_r, zBC_i;
xBC_r.lCode = spline->xBC.lCode; xBC_r.rCode = spline->xBC.rCode;
xBC_r.lVal = spline->xBC.lVal_r; xBC_r.rVal = spline->xBC.rVal_r;
xBC_i.lCode = spline->xBC.lCode; xBC_i.rCode = spline->xBC.rCode;
xBC_i.lVal = spline->xBC.lVal_i; xBC_i.rVal = spline->xBC.rVal_i;
yBC_r.lCode = spline->yBC.lCode; yBC_r.rCode = spline->yBC.rCode;
yBC_r.lVal = spline->yBC.lVal_r; yBC_r.rVal = spline->yBC.rVal_r;
yBC_i.lCode = spline->yBC.lCode; yBC_i.rCode = spline->yBC.rCode;
yBC_i.lVal = spline->yBC.lVal_i; yBC_i.rVal = spline->yBC.rVal_i;
zBC_r.lCode = spline->zBC.lCode; zBC_r.rCode = spline->zBC.rCode;
zBC_r.lVal = spline->zBC.lVal_r; zBC_r.rVal = spline->zBC.rVal_r;
zBC_i.lCode = spline->zBC.lCode; zBC_i.rCode = spline->zBC.rCode;
zBC_i.lVal = spline->zBC.lVal_i; zBC_i.rVal = spline->zBC.rVal_i;
// First, solve in the X-direction
for (int iy=0; iy<My; iy++)
for (int iz=0; iz<Mz; iz++) {
intptr_t doffset = 2*(iy*Mz+iz);
intptr_t coffset = 2*(iy*Nz+iz);
// Real part
find_coefs_1d_d (spline->x_grid, xBC_r, ((double*)data)+doffset, 2*My*Mz,
((double*)spline->coefs)+coffset, 2*Ny*Nz);
// Imag part
find_coefs_1d_d (spline->x_grid, xBC_i, ((double*)data)+doffset+1, 2*My*Mz,
((double*)spline->coefs)+coffset+1, 2*Ny*Nz);
}
// Now, solve in the Y-direction
for (int ix=0; ix<Nx; ix++)
for (int iz=0; iz<Nz; iz++) {
intptr_t doffset = 2*(ix*Ny*Nz + iz);
intptr_t coffset = 2*(ix*Ny*Nz + iz);
// Real part
find_coefs_1d_d (spline->y_grid, yBC_r, ((double*)spline->coefs)+doffset, 2*Nz,
((double*)spline->coefs)+coffset, 2*Nz);
// Imag part
find_coefs_1d_d (spline->y_grid, yBC_i, ((double*)spline->coefs)+doffset+1, 2*Nz,
((double*)spline->coefs)+coffset+1, 2*Nz);
}
// Now, solve in the Z-direction
for (int ix=0; ix<Nx; ix++)
for (int iy=0; iy<Ny; iy++) {
intptr_t doffset = 2*((ix*Ny+iy)*Nz);
intptr_t coffset = 2*((ix*Ny+iy)*Nz);
// Real part
find_coefs_1d_d (spline->z_grid, zBC_r, ((double*)spline->coefs)+doffset, 2,
((double*)spline->coefs)+coffset, 2);
// Imag part
find_coefs_1d_d (spline->z_grid, zBC_i, ((double*)spline->coefs)+doffset+1, 2,
((double*)spline->coefs)+coffset+1, 2);
}
}
void
destroy_UBspline (Bspline *spline)
{
free (spline->coefs);
free (spline);
}
void
destroy_NUBspline (Bspline *spline);
void
destroy_multi_UBspline (Bspline *spline);
void
destroy_Bspline (void *spline)
{
Bspline *sp = (Bspline *)spline;
if (sp->sp_code <= U3D)
destroy_UBspline (sp);
else if (sp->sp_code <= NU3D)
destroy_NUBspline (sp);
else if (sp->sp_code <= MULTI_U3D)
destroy_multi_UBspline (sp);
else
fprintf (stderr, "Error in destroy_Bspline: invalide spline code %d.\n",
sp->sp_code);
}
|
3d7pt.c | /*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k])
+ beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] +
A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]);
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
atlasmm.c | /* Hi, everybody!
* =====================================================================================
*
* Filename: MMmultiple.c
*
* Description: Do Matrix Multiplication C = A x B with A and B blocks generated by
* mkmatrices.c.
*
* Version: 1.0
* Created: 09/21/2016 22:53:31
* Revision: none
* Compiler: gcc
*
* Author: Xiukun Hu
* Organization: University of Wyoming, Department of Mathematics
*
* =====================================================================================
*/
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_num_threads() 1;
#define omp_get_thread_num() 0;
#define omp_get_max_threads() 1;
#endif
#include <stdio.h>
#include <stdlib.h>
#include "cblas.h"
#include "matrices.h"
#ifndef WIDTH
#define WIDTH 30
#endif
void ClearMatrix( double** matrix, int nrows, int ncols ) {
int i, j;
for ( i = 0 ; i < nrows ; i++ )
for ( j = 0 ; j < ncols ; j++ )
matrix[i][j] = 0;
}
int main(){
/* Local declarations */
enum CBLAS_ORDER order = CblasColMajor;
enum CBLAS_TRANSPOSE transA = CblasNoTrans;
enum CBLAS_TRANSPOSE transB = CblasNoTrans;
const int NTH = omp_get_max_threads();
double tsc[NTH];
double tsc1;
double t1; /* Time keeper */
double t2; /* Time keeper */
double tt1;
double tt;
double tio1; /* Private I/O time keeper */
double tio = 0; /* Private I/O time keeper */
double tc1; /* Compute time */
double tc = 0; /* Compute time */
double tw1; /* Wate time */
double tw = 0; /* Wate time */
double temp; /* Private pointer for saving results */
double mrun(); /* Get timing information */
double **ablock[2]; /* Pointer to one block of A */
double **bblock[2]; /* Pointer to one block of B */
double **cblock[2]; /* Pointer to one block of C */
int acols = 0; /* Block columns in A */
int arows = 0; /* Block rows in A */
int bcols = 0; /* Block columns in B */
int brows = 0; /* Block rows in B */
int ccols = 0; /* Block columns in C */
int crows = 0; /* Block rows in C */
int blk_cols = 0; /* Columns in a block */
int blk_rows = 0; /* Rows in a block */
int mopt_a = 1; /* How to allocate space in A blocks */
int mopt_b = 1; /* How to allocate space in B blocks */
int mopt_c = 1; /* How to allocate space in C blocks */
int colleft; /* Block columns residue by WIDTH */
int i = 0; /* Loop index */
int j = 0; /* Loop index */
int k = 0; /* Loop index */
int I,J,K; /* Loop index */
int iplus; /* Loop index */
int jplus; /* Loop index */
int kplus; /* Loop index */
int tog = 0; /* Toggle for a&bblock */
int ctog = 0; /* Toggle for cblock */
int TID; /* Thread ID */
int ar; /* ablock row index */
int ac; /* ablock col index */
int rc;
int nI;
int nThreads;
char c = ' '; /* Input character */
tt1 = mrun();
/* Get matrix information from disk */
matrix_info_read( &blk_rows, &blk_cols,
&arows, &acols,
&brows, &bcols,
&crows, &ccols );
/* Preprocess message */
colleft = blk_cols % WIDTH; /* Colunms left for each block over WIDTH */
nI = blk_rows * (blk_cols / WIDTH); /* Number of iterations for each block */
rc = blk_cols - colleft; /* The starting index of the residue column */
/* Allocate 6 block matrices (two each for A, B and C) */
ablock[0] = block_allocate( blk_rows, blk_cols, mopt_a );
bblock[0] = block_allocate( blk_rows, blk_cols, mopt_b );
cblock[0] = block_allocate( blk_rows, blk_cols, mopt_c );
ablock[1] = block_allocate( blk_rows, blk_cols, mopt_a );
bblock[1] = block_allocate( blk_rows, blk_cols, mopt_b );
cblock[1] = block_allocate( blk_rows, blk_cols, mopt_c );
ClearMatrix( cblock[0], blk_rows, blk_cols );
ClearMatrix( cblock[1], blk_rows, blk_cols );
/* Enter parallel region */
#pragma omp parallel default(none) \
shared(blk_cols, blk_rows, \
ablock, bblock, cblock, \
mopt_a, mopt_b, mopt_c, \
acols, crows, ccols, \
colleft, nI, nThreads, \
rc, t1, t2, tsc, tsc1) \
firstprivate( tog, ctog, i, j, k, tio, tc, tw ) \
private( TID, I, J, K, iplus, jplus, kplus, temp, ar, ac, tio1, tc1, tw1 )
{
#pragma omp single
{
nThreads = omp_get_num_threads();
t1 = mrun();
}
tc1 = t1;
TID = omp_get_thread_num();
/* Single thread reading the A00 B00 for calculating */
#pragma omp single
{
tio1 = mrun();
tc += tio1 - tc1;
block_readdisk( blk_rows, blk_cols, "A", 0, 0, ablock[0], mopt_a, 0 );
block_readdisk( blk_rows, blk_cols, "B", 0, 0, bblock[0], mopt_a, 0 );
tc1 = mrun();
tio += tc1 - tio1;
printf("Thread %d reading A00 and B00 in %les\n", TID, tio);
} // single thread reading A00 B00
/* Reading and calculating at the same time */
while ( i < crows ){
/* Get next loop's index i+, j+ and k+ */
kplus = (k+1) % acols;
jplus = (kplus==0)? ((j+1)%ccols) : j;
iplus = (jplus==0 && kplus==0)? i+1 : i;
/* Single thread reading A_i+k+ & B_k+j+ */
#pragma omp single nowait
{
if ( iplus < crows ) {
tio1 = mrun();
tc += tio1 - tc1;
block_readdisk( blk_rows, blk_cols, "A", iplus, kplus, ablock[1-tog], mopt_a, 0 );
block_readdisk( blk_rows, blk_cols, "B", kplus, jplus, bblock[1-tog], mopt_b, 0 );
tc1 = mrun();
tio += tc1 - tio1;
}
}
#pragma omp single nowait
if ( i == 0 && j == 0 && k == 0 )
tsc1 = mrun();
/* Multithreads calculating A_ik x B_kj */
cblas_dgemm(order,transA,transB, blk_rows, blk_cols, blk_cols ,1.0,
ablock[tog][0], blk_rows , bblock[tog][0], blk_cols ,1.0,cblock[ctog][0], blk_rows);
tw1 = mrun();
tc += tw1 - tc1;
if ( i == 0 && j == 0 && k == 0 )
tsc[TID] = mrun();
/* Barrier for reading A_i+k+ B_k+j+ and calculating A_ik x B_kj */
#pragma omp barrier
tc1 = mrun();
tw += tc1 - tw1;
/* Every thread check but single thread write to disk */
if ( kplus==0 ) {
#pragma omp single nowait
{
tio1 = mrun();
tc += tio1 - tc1;
block_write2disk( blk_rows, blk_cols, "D", i, j, cblock[ctog][0] );
ClearMatrix( cblock[ctog], blk_rows, blk_cols );
tc1 = mrun();
tio += tc1 - tio1;
} // Write cblock: OMP single nowait
ctog = 1-ctog; // Every thread change ctog if k+ = 0.
}
/* Every thread change to another ablock and bblock and update index */
tog = 1 - tog;
i = iplus;
j = jplus;
k = kplus;
} /* While loop for blocks */
printf("Thread %d, compute for %les, io for %les, wait for %le\n", TID, tc, tio, tw);
#pragma omp master
{
t2 = mrun() - t1;
}
}// End of parallel region
printf("Time in parallel region: %les\n", t2);
for ( i = 1 ; i < nThreads ; i++ )
tsc[0] = (tsc[0] < tsc[i])? tsc[i] : tsc[0];
tt = mrun() - tt1;
/* Print time */
printf("Total time: %les\n", tt);
printf("Time for multiplying A00 x B00 in parallel: %le\n", tsc[0]-tsc1);
/* End */
return 0;
}
|
libperf.c | /**
* Copyright (C) Mellanox Technologies Ltd. 2001-2014. ALL RIGHTS RESERVED.
* Copyright (C) UT-Battelle, LLC. 2015. ALL RIGHTS RESERVED.
* Copyright (C) The University of Tennessee and The University
* of Tennessee Research Foundation. 2015-2016. ALL RIGHTS RESERVED.
* Copyright (C) ARM Ltd. 2017. ALL RIGHTS RESERVED.
* See file LICENSE for terms.
*/
#include "libperf_int.h"
#include <ucs/debug/log.h>
#include <ucs/arch/bitops.h>
#include <string.h>
#include <malloc.h>
#include <unistd.h>
#define ATOMIC_OP_CONFIG(_size, _op32, _op64, _op, _msg, _params, _status) \
_status = __get_atomic_flag((_size), (_op32), (_op64), (_op)); \
if (_status != UCS_OK) { \
ucs_error("%s/%s does not support atomic %s for message size %zu bytes", \
(_params)->uct.tl_name, (_params)->uct.dev_name, \
(_msg)[_op], (_size)); \
return _status; \
}
#define ATOMIC_OP_CHECK(_size, _attr, _required, _params, _msg) \
if (!ucs_test_all_flags(_attr, _required)) { \
if ((_params)->flags & UCX_PERF_TEST_FLAG_VERBOSE) { \
ucs_error("%s/%s does not support required "#_size"-bit atomic: %s", \
(_params)->uct.tl_name, (_params)->uct.dev_name, \
(_msg)[ucs_ffs64(~(_attr) & (_required))]); \
} \
return UCS_ERR_UNSUPPORTED; \
}
typedef struct {
union {
struct {
size_t dev_addr_len;
size_t iface_addr_len;
size_t ep_addr_len;
} uct;
struct {
size_t addr_len;
} ucp;
};
size_t rkey_size;
unsigned long recv_buffer;
} ucx_perf_ep_info_t;
static const char *perf_iface_ops[] = {
[ucs_ilog2(UCT_IFACE_FLAG_AM_SHORT)] = "am short",
[ucs_ilog2(UCT_IFACE_FLAG_AM_BCOPY)] = "am bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_AM_ZCOPY)] = "am zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_SHORT)] = "put short",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_BCOPY)] = "put bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_PUT_ZCOPY)] = "put zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_SHORT)] = "get short",
[ucs_ilog2(UCT_IFACE_FLAG_GET_BCOPY)] = "get bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_GET_ZCOPY)] = "get zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_ERRHANDLE_PEER_FAILURE)] = "peer failure handler",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_IFACE)] = "connect to iface",
[ucs_ilog2(UCT_IFACE_FLAG_CONNECT_TO_EP)] = "connect to ep",
[ucs_ilog2(UCT_IFACE_FLAG_AM_DUP)] = "full reliability",
[ucs_ilog2(UCT_IFACE_FLAG_CB_SYNC)] = "sync callback",
[ucs_ilog2(UCT_IFACE_FLAG_CB_ASYNC)] = "async callback",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_SEND_COMP)] = "send completion event",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV)] = "tag or active message event",
[ucs_ilog2(UCT_IFACE_FLAG_EVENT_RECV_SIG)] = "signaled message event",
[ucs_ilog2(UCT_IFACE_FLAG_PENDING)] = "pending",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_SHORT)] = "tag eager short",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_BCOPY)] = "tag eager bcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_EAGER_ZCOPY)] = "tag eager zcopy",
[ucs_ilog2(UCT_IFACE_FLAG_TAG_RNDV_ZCOPY)] = "tag rndv zcopy"
};
static const char *perf_atomic_op[] = {
[UCT_ATOMIC_OP_ADD] = "add",
[UCT_ATOMIC_OP_AND] = "and",
[UCT_ATOMIC_OP_OR] = "or" ,
[UCT_ATOMIC_OP_XOR] = "xor"
};
static const char *perf_atomic_fop[] = {
[UCT_ATOMIC_OP_ADD] = "fetch-add",
[UCT_ATOMIC_OP_AND] = "fetch-and",
[UCT_ATOMIC_OP_OR] = "fetch-or",
[UCT_ATOMIC_OP_XOR] = "fetch-xor",
[UCT_ATOMIC_OP_SWAP] = "swap",
[UCT_ATOMIC_OP_CSWAP] = "cswap"
};
/*
* This Quickselect routine is based on the algorithm described in
* "Numerical recipes in C", Second Edition,
* Cambridge University Press, 1992, Section 8.5, ISBN 0-521-43108-5
* This code by Nicolas Devillard - 1998. Public domain.
*/
static ucs_time_t __find_median_quick_select(ucs_time_t arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
#define ELEM_SWAP(a,b) { register ucs_time_t t=(a);(a)=(b);(b)=t; }
low = 0 ; high = n-1 ; median = (low + high) / 2;
for (;;) {
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1) { /* Two elements only */
if (arr[low] > arr[high])
ELEM_SWAP(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) / 2;
if (arr[middle] > arr[high]) ELEM_SWAP(arr[middle], arr[high]) ;
if (arr[low] > arr[high]) ELEM_SWAP(arr[low], arr[high]) ;
if (arr[middle] > arr[low]) ELEM_SWAP(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
ELEM_SWAP(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;) {
do ll++; while (arr[low] > arr[ll]) ;
do hh--; while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
ELEM_SWAP(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
ELEM_SWAP(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static ucs_status_t uct_perf_test_alloc_mem(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucs_status_t status;
unsigned flags;
size_t buffer_size;
if ((UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) && params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* TODO use params->alignment */
flags = (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCT_MD_MEM_FLAG_NONBLOCK : 0;
flags |= UCT_MD_MEM_ACCESS_ALL;
/* Allocate send buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.send_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate send buffer: %s", ucs_status_string(status));
goto err;
}
ucs_assert(perf->uct.send_mem.md == perf->uct.md);
perf->send_buffer = perf->uct.send_mem.address;
/* Allocate receive buffer memory */
status = uct_iface_mem_alloc(perf->uct.iface,
buffer_size * params->thread_count,
flags, "perftest", &perf->uct.recv_mem);
if (status != UCS_OK) {
ucs_error("Failed allocate receive buffer: %s", ucs_status_string(status));
goto err_free_send;
}
ucs_assert(perf->uct.recv_mem.md == perf->uct.md);
perf->recv_buffer = perf->uct.recv_mem.address;
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->uct.iov = malloc(sizeof(*perf->uct.iov) *
perf->params.msg_size_cnt *
params->thread_count);
if (NULL == perf->uct.iov) {
status = UCS_ERR_NO_MEMORY;
ucs_error("Failed allocate send IOV(%lu) buffer: %s",
perf->params.msg_size_cnt, ucs_status_string(status));
goto err_free_send;
}
perf->offset = 0;
ucs_debug("allocated memory. Send buffer %p, Recv buffer %p",
perf->send_buffer, perf->recv_buffer);
return UCS_OK;
err_free_send:
uct_iface_mem_free(&perf->uct.send_mem);
err:
return status;
}
static void uct_perf_test_free_mem(ucx_perf_context_t *perf)
{
uct_iface_mem_free(&perf->uct.send_mem);
uct_iface_mem_free(&perf->uct.recv_mem);
free(perf->uct.iov);
}
void ucx_perf_test_start_clock(ucx_perf_context_t *perf)
{
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->prev.time = perf->start_time;
}
static void ucx_perf_test_reset(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
unsigned i;
perf->params = *params;
perf->start_time = ucs_get_time();
perf->prev_time = perf->start_time;
perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX :
ucs_time_from_sec(perf->params.max_time) + perf->start_time;
perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX :
perf->params.max_iter;
perf->report_interval = ucs_time_from_sec(perf->params.report_interval);
perf->current.time = 0;
perf->current.msgs = 0;
perf->current.bytes = 0;
perf->current.iters = 0;
perf->prev.time = perf->start_time;
perf->prev.msgs = 0;
perf->prev.bytes = 0;
perf->prev.iters = 0;
perf->timing_queue_head = 0;
perf->offset = 0;
for (i = 0; i < TIMING_QUEUE_SIZE; ++i) {
perf->timing_queue[i] = 0;
}
}
void ucx_perf_calc_result(ucx_perf_context_t *perf, ucx_perf_result_t *result)
{
double factor;
double sec_value;
sec_value = ucs_time_from_sec(1.0);
if (perf->params.test_type == UCX_PERF_TEST_TYPE_PINGPONG) {
factor = 2.0;
} else {
factor = 1.0;
}
result->iters = perf->current.iters;
result->bytes = perf->current.bytes;
result->elapsed_time = perf->current.time - perf->start_time;
/* Latency */
result->latency.typical =
__find_median_quick_select(perf->timing_queue, TIMING_QUEUE_SIZE)
/ sec_value
/ factor;
result->latency.moment_average =
(double)(perf->current.time - perf->prev.time)
/ (perf->current.iters - perf->prev.iters)
/ sec_value
/ factor;
result->latency.total_average =
(double)(perf->current.time - perf->start_time)
/ perf->current.iters
/ sec_value
/ factor;
/* Bandwidth */
result->bandwidth.typical = 0.0; // Undefined
result->bandwidth.moment_average =
(perf->current.bytes - perf->prev.bytes) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->bandwidth.total_average =
perf->current.bytes * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
/* Packet rate */
result->msgrate.typical = 0.0; // Undefined
result->msgrate.moment_average =
(perf->current.msgs - perf->prev.msgs) * sec_value
/ (double)(perf->current.time - perf->prev.time) * factor;
result->msgrate.total_average =
perf->current.msgs * sec_value
/ (double)(perf->current.time - perf->start_time) * factor;
}
static ucs_status_t ucx_perf_test_check_params(ucx_perf_params_t *params)
{
size_t it;
if (ucx_perf_get_message_size(params) < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size too small, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
if (params->max_outstanding < 1) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("max_outstanding, need to be at least 1");
}
return UCS_ERR_INVALID_PARAM;
}
/* check if particular message size fit into stride size */
if (params->iov_stride) {
for (it = 0; it < params->msg_size_cnt; ++it) {
if (params->msg_size_list[it] > params->iov_stride) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Buffer size %lu bigger than stride %lu",
params->msg_size_list[it], params->iov_stride);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
void uct_perf_iface_flush_b(ucx_perf_context_t *perf)
{
ucs_status_t status;
do {
status = uct_iface_flush(perf->uct.iface, 0, NULL);
uct_worker_progress(perf->uct.worker);
} while (status == UCS_INPROGRESS);
}
static inline uint64_t __get_flag(uct_perf_data_layout_t layout, uint64_t short_f,
uint64_t bcopy_f, uint64_t zcopy_f)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_f :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_f :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_f :
0;
}
static inline ucs_status_t __get_atomic_flag(size_t size, uint64_t *op32,
uint64_t *op64, uint64_t op)
{
if (size == sizeof(uint32_t)) {
*op32 = UCS_BIT(op);
return UCS_OK;
} else if (size == sizeof(uint64_t)) {
*op64 = UCS_BIT(op);
return UCS_OK;
}
return UCS_ERR_UNSUPPORTED;
}
static inline size_t __get_max_size(uct_perf_data_layout_t layout, size_t short_m,
size_t bcopy_m, uint64_t zcopy_m)
{
return (layout == UCT_PERF_DATA_LAYOUT_SHORT) ? short_m :
(layout == UCT_PERF_DATA_LAYOUT_BCOPY) ? bcopy_m :
(layout == UCT_PERF_DATA_LAYOUT_ZCOPY) ? zcopy_m :
0;
}
static ucs_status_t uct_perf_test_check_capabilities(ucx_perf_params_t *params,
uct_iface_h iface)
{
uint64_t required_flags = 0;
uint64_t atomic_op32 = 0;
uint64_t atomic_op64 = 0;
uint64_t atomic_fop32 = 0;
uint64_t atomic_fop64 = 0;
uct_iface_attr_t attr;
ucs_status_t status;
size_t min_size, max_size, max_iov, message_size;
status = uct_iface_query(iface, &attr);
if (status != UCS_OK) {
return status;
}
min_size = 0;
max_iov = 1;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_AM:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_AM_SHORT,
UCT_IFACE_FLAG_AM_BCOPY, UCT_IFACE_FLAG_AM_ZCOPY);
required_flags |= UCT_IFACE_FLAG_CB_SYNC;
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.am.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.am.max_short,
attr.cap.am.max_bcopy, attr.cap.am.max_zcopy);
max_iov = attr.cap.am.max_iov;
break;
case UCX_PERF_CMD_PUT:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_PUT_SHORT,
UCT_IFACE_FLAG_PUT_BCOPY, UCT_IFACE_FLAG_PUT_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.put.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.put.max_short,
attr.cap.put.max_bcopy, attr.cap.put.max_zcopy);
max_iov = attr.cap.put.max_iov;
break;
case UCX_PERF_CMD_GET:
required_flags = __get_flag(params->uct.data_layout, UCT_IFACE_FLAG_GET_SHORT,
UCT_IFACE_FLAG_GET_BCOPY, UCT_IFACE_FLAG_GET_ZCOPY);
min_size = __get_max_size(params->uct.data_layout, 0, 0,
attr.cap.get.min_zcopy);
max_size = __get_max_size(params->uct.data_layout, attr.cap.get.max_short,
attr.cap.get.max_bcopy, attr.cap.get.max_zcopy);
max_iov = attr.cap.get.max_iov;
break;
case UCX_PERF_CMD_ADD:
ATOMIC_OP_CONFIG(message_size, &atomic_op32, &atomic_op64, UCT_ATOMIC_OP_ADD,
perf_atomic_op, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_FADD:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_ADD,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_SWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_SWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
case UCX_PERF_CMD_CSWAP:
ATOMIC_OP_CONFIG(message_size, &atomic_fop32, &atomic_fop64, UCT_ATOMIC_OP_CSWAP,
perf_atomic_fop, params, status);
max_size = 8;
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
/* check atomics first */
ATOMIC_OP_CHECK(32, attr.cap.atomic32.op_flags, atomic_op32, params, perf_atomic_op);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.op_flags, atomic_op64, params, perf_atomic_op);
ATOMIC_OP_CHECK(32, attr.cap.atomic32.fop_flags, atomic_fop32, params, perf_atomic_fop);
ATOMIC_OP_CHECK(64, attr.cap.atomic64.fop_flags, atomic_fop64, params, perf_atomic_fop);
/* check iface flags */
if (!(atomic_op32 | atomic_op64 | atomic_fop32 | atomic_fop64) &&
(!ucs_test_all_flags(attr.cap.flags, required_flags) || !required_flags)) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("%s/%s does not support operation %s",
params->uct.tl_name, params->uct.dev_name,
perf_iface_ops[ucs_ffs64(~attr.cap.flags & required_flags)]);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size < min_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is smaller than min supported (%zu)",
message_size, min_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (message_size > max_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Message size (%zu) is larger than max supported (%zu)",
message_size, max_size);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->command == UCX_PERF_CMD_AM) {
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_SHORT) &&
(params->am_hdr_size != sizeof(uint64_t)))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Short AM header size must be 8 bytes");
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->uct.data_layout == UCT_PERF_DATA_LAYOUT_ZCOPY) &&
(params->am_hdr_size > attr.cap.am.max_hdr))
{
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than max supported (%zu)",
params->am_hdr_size, attr.cap.am.max_hdr);
}
return UCS_ERR_UNSUPPORTED;
}
if (params->am_hdr_size > message_size) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%zu) is larger than message size (%zu)",
params->am_hdr_size, message_size);
}
return UCS_ERR_INVALID_PARAM;
}
if (params->uct.fc_window > UCT_PERF_TEST_MAX_FC_WINDOW) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM flow-control window (%d) too large (should be <= %d)",
params->uct.fc_window, UCT_PERF_TEST_MAX_FC_WINDOW);
}
return UCS_ERR_INVALID_PARAM;
}
if ((params->flags & UCX_PERF_TEST_FLAG_ONE_SIDED) &&
(params->flags & UCX_PERF_TEST_FLAG_VERBOSE))
{
ucs_warn("Running active-message test with on-sided progress");
}
}
if (UCT_PERF_DATA_LAYOUT_ZCOPY == params->uct.data_layout) {
if (params->msg_size_cnt > max_iov) {
if ((params->flags & UCX_PERF_TEST_FLAG_VERBOSE) ||
!params->msg_size_cnt) {
ucs_error("Wrong number of IOV entries. Requested is %lu, "
"should be in the range 1...%lu", params->msg_size_cnt,
max_iov);
}
return UCS_ERR_UNSUPPORTED;
}
/* if msg_size_cnt == 1 the message size checked above */
if ((UCX_PERF_CMD_AM == params->command) && (params->msg_size_cnt > 1)) {
if (params->am_hdr_size > params->msg_size_list[0]) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("AM header size (%lu) larger than the first IOV "
"message size (%lu)", params->am_hdr_size,
params->msg_size_list[0]);
}
return UCS_ERR_INVALID_PARAM;
}
}
}
return UCS_OK;
}
static ucs_status_t uct_perf_test_setup_endpoints(ucx_perf_context_t *perf)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
uct_device_addr_t *dev_addr;
uct_iface_addr_t *iface_addr;
uct_ep_addr_t *ep_addr;
uct_iface_attr_t iface_attr;
uct_md_attr_t md_attr;
void *rkey_buffer;
ucs_status_t status;
struct iovec vec[5];
void *buffer;
void *req;
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE buffer");
status = UCS_ERR_NO_MEMORY;
goto err;
}
status = uct_iface_query(perf->uct.iface, &iface_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_query: %s", ucs_status_string(status));
goto err_free;
}
status = uct_md_query(perf->uct.md, &md_attr);
if (status != UCS_OK) {
ucs_error("Failed to uct_md_query: %s", ucs_status_string(status));
goto err_free;
}
if (md_attr.cap.flags & (UCT_MD_FLAG_ALLOC|UCT_MD_FLAG_REG)) {
info.rkey_size = md_attr.rkey_packed_size;
} else {
info.rkey_size = 0;
}
info.uct.dev_addr_len = iface_attr.device_addr_len;
info.uct.iface_addr_len = iface_attr.iface_addr_len;
info.uct.ep_addr_len = iface_attr.ep_addr_len;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
rkey_buffer = buffer;
dev_addr = (void*)rkey_buffer + info.rkey_size;
iface_addr = (void*)dev_addr + info.uct.dev_addr_len;
ep_addr = (void*)iface_addr + info.uct.iface_addr_len;
ucs_assert_always((void*)ep_addr + info.uct.ep_addr_len <= buffer + buffer_size);
status = uct_iface_get_device_address(perf->uct.iface, dev_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_device_address: %s",
ucs_status_string(status));
goto err_free;
}
status = uct_iface_get_address(perf->uct.iface, iface_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_iface_get_address: %s", ucs_status_string(status));
goto err_free;
}
if (info.rkey_size > 0) {
memset(rkey_buffer, 0, info.rkey_size);
status = uct_md_mkey_pack(perf->uct.md, perf->uct.recv_mem.memh, rkey_buffer);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_pack: %s", ucs_status_string(status));
goto err_free;
}
}
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
perf->uct.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->uct.peers == NULL) {
goto err_free;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
status = uct_ep_create(perf->uct.iface, &perf->uct.peers[i].ep);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_create: %s", ucs_status_string(status));
goto err_destroy_eps;
}
status = uct_ep_get_address(perf->uct.peers[i].ep, ep_addr);
if (status != UCS_OK) {
ucs_error("Failed to uct_ep_get_address: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
}
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = buffer;
vec[1].iov_len = info.rkey_size + info.uct.dev_addr_len +
info.uct.iface_addr_len + info.uct.ep_addr_len;
rte_call(perf, post_vec, vec, 2, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
rkey_buffer = remote_info + 1;
dev_addr = (void*)rkey_buffer + remote_info->rkey_size;
iface_addr = (void*)dev_addr + remote_info->uct.dev_addr_len;
ep_addr = (void*)iface_addr + remote_info->uct.iface_addr_len;
perf->uct.peers[i].remote_addr = remote_info->recv_buffer;
if (!uct_iface_is_reachable(perf->uct.iface, dev_addr,
remote_info->uct.iface_addr_len ?
iface_addr : NULL)) {
ucs_error("Destination is unreachable");
status = UCS_ERR_UNREACHABLE;
goto err_destroy_eps;
}
if (remote_info->rkey_size > 0) {
status = uct_rkey_unpack(rkey_buffer, &perf->uct.peers[i].rkey);
if (status != UCS_OK) {
ucs_error("Failed to uct_rkey_unpack: %s", ucs_status_string(status));
goto err_destroy_eps;
}
} else {
perf->uct.peers[i].rkey.handle = NULL;
perf->uct.peers[i].rkey.type = NULL;
perf->uct.peers[i].rkey.rkey = UCT_INVALID_RKEY;
}
if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_EP) {
status = uct_ep_connect_to_ep(perf->uct.peers[i].ep, dev_addr, ep_addr);
} else if (iface_attr.cap.flags & UCT_IFACE_FLAG_CONNECT_TO_IFACE) {
status = uct_ep_create_connected(perf->uct.iface, dev_addr, iface_addr,
&perf->uct.peers[i].ep);
} else {
status = UCS_ERR_UNSUPPORTED;
}
if (status != UCS_OK) {
ucs_error("Failed to connect endpoint: %s", ucs_status_string(status));
goto err_destroy_eps;
}
}
uct_perf_iface_flush_b(perf);
free(buffer);
uct_perf_barrier(perf);
return UCS_OK;
err_destroy_eps:
for (i = 0; i < group_size; ++i) {
if (perf->uct.peers[i].rkey.type != NULL) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep != NULL) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
free(perf->uct.peers);
err_free:
free(buffer);
err:
return status;
}
static void uct_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size, group_index, i;
uct_perf_barrier(perf);
uct_iface_set_am_handler(perf->uct.iface, UCT_PERF_TEST_AM_ID, NULL, NULL, 0);
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
for (i = 0; i < group_size; ++i) {
if (i != group_index) {
if (perf->uct.peers[i].rkey.rkey != UCT_INVALID_RKEY) {
uct_rkey_release(&perf->uct.peers[i].rkey);
}
if (perf->uct.peers[i].ep) {
uct_ep_destroy(perf->uct.peers[i].ep);
}
}
}
free(perf->uct.peers);
}
static ucs_status_t ucp_perf_test_fill_params(ucx_perf_params_t *params,
ucp_params_t *ucp_params)
{
ucs_status_t status, message_size;
message_size = ucx_perf_get_message_size(params);
switch (params->command) {
case UCX_PERF_CMD_PUT:
case UCX_PERF_CMD_GET:
ucp_params->features |= UCP_FEATURE_RMA;
break;
case UCX_PERF_CMD_ADD:
case UCX_PERF_CMD_FADD:
case UCX_PERF_CMD_SWAP:
case UCX_PERF_CMD_CSWAP:
if (message_size == sizeof(uint32_t)) {
ucp_params->features |= UCP_FEATURE_AMO32;
} else if (message_size == sizeof(uint64_t)) {
ucp_params->features |= UCP_FEATURE_AMO64;
} else {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Atomic size should be either 32 or 64 bit");
}
return UCS_ERR_INVALID_PARAM;
}
break;
case UCX_PERF_CMD_TAG:
case UCX_PERF_CMD_TAG_SYNC:
ucp_params->features |= UCP_FEATURE_TAG;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
case UCX_PERF_CMD_STREAM:
ucp_params->features |= UCP_FEATURE_STREAM;
ucp_params->field_mask |= UCP_PARAM_FIELD_REQUEST_SIZE;
ucp_params->request_size = sizeof(ucp_perf_request_t);
break;
default:
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Invalid test command");
}
return UCS_ERR_INVALID_PARAM;
}
status = ucx_perf_test_check_params(params);
if (status != UCS_OK) {
return status;
}
return UCS_OK;
}
static ucs_status_t ucp_perf_test_alloc_iov_mem(ucp_perf_datatype_t datatype,
size_t iovcnt, unsigned thread_count,
ucp_dt_iov_t **iov_p)
{
ucp_dt_iov_t *iov;
if (UCP_PERF_DATATYPE_IOV == datatype) {
iov = malloc(sizeof(*iov) * iovcnt * thread_count);
if (NULL == iov) {
ucs_error("Failed allocate IOV buffer with iovcnt=%lu", iovcnt);
return UCS_ERR_NO_MEMORY;
}
*iov_p = iov;
}
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_host(ucx_perf_context_t *perf, ucx_perf_params_t *params,
void **addr, size_t length, ucp_mem_h *memh,
int check_non_blk_flag)
{
ucp_mem_map_params_t mem_map_params;
ucp_mem_attr_t mem_attr;
ucs_status_t status;
mem_map_params.field_mask = UCP_MEM_MAP_PARAM_FIELD_ADDRESS |
UCP_MEM_MAP_PARAM_FIELD_LENGTH |
UCP_MEM_MAP_PARAM_FIELD_FLAGS;
mem_map_params.address = *addr;
mem_map_params.length = length;
mem_map_params.flags = UCP_MEM_MAP_ALLOCATE;
if (check_non_blk_flag) {
mem_map_params.flags |= (params->flags & UCX_PERF_TEST_FLAG_MAP_NONBLOCK) ?
UCP_MEM_MAP_NONBLOCK : 0;
}
status = ucp_mem_map(perf->ucp.context, &mem_map_params, memh);
if (status != UCS_OK) {
goto err;
}
mem_attr.field_mask = UCP_MEM_ATTR_FIELD_ADDRESS;
status = ucp_mem_query(*memh, &mem_attr);
if (status != UCS_OK) {
goto err;
}
*addr = mem_attr.address;
return UCS_OK;
err:
return status;
}
static ucs_status_t
ucp_perf_test_alloc_cuda(void **addr, size_t length)
{
#if HAVE_CUDA
cudaError_t cerr;
cerr = cudaMalloc(addr, length);
if (cerr != cudaSuccess) {
return UCS_ERR_NO_MEMORY;
}
#endif
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_cuda_managed(void **addr, size_t length)
{
#if HAVE_CUDA
cudaError_t cerr;
cerr = cudaMallocManaged(addr, length, cudaMemAttachGlobal);
if (cerr != cudaSuccess) {
return UCS_ERR_NO_MEMORY;
}
#endif
return UCS_OK;
}
static ucs_status_t
ucp_perf_test_alloc_contig(ucx_perf_context_t *perf, ucx_perf_params_t *params,
void **addr, size_t length, ucp_mem_h *memh,
int check_non_blk_flag)
{
if (perf->params.mem_type == UCT_MD_MEM_TYPE_HOST) {
return ucp_perf_test_alloc_host(perf, params, addr, length, memh,
check_non_blk_flag);
} else if (perf->params.mem_type == UCT_MD_MEM_TYPE_CUDA) {
return ucp_perf_test_alloc_cuda(addr, length);
} else if (perf->params.mem_type == UCT_MD_MEM_TYPE_CUDA_MANAGED) {
return ucp_perf_test_alloc_cuda_managed(addr, length);
}
return UCS_ERR_UNSUPPORTED;
}
static void ucp_perf_test_free_contig(ucx_perf_context_t *perf, void *addr, ucp_mem_h memh)
{
ucs_status_t status;
if (perf->params.mem_type == UCT_MD_MEM_TYPE_HOST) {
status = ucp_mem_unmap(perf->ucp.context, memh);
if (status != UCS_OK) {
ucs_warn("ucp_mem_unmap() failed: %s", ucs_status_string(status));
}
} else if ((perf->params.mem_type == UCT_MD_MEM_TYPE_CUDA) ||
(perf->params.mem_type == UCT_MD_MEM_TYPE_CUDA_MANAGED)) {
#if HAVE_CUDA
cudaFree(addr);
#endif
}
}
static ucs_status_t ucp_perf_test_alloc_mem(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
ucs_status_t status;
size_t buffer_size;
if (params->iov_stride) {
buffer_size = params->msg_size_cnt * params->iov_stride;
} else {
buffer_size = ucx_perf_get_message_size(params);
}
/* Allocate send buffer memory */
perf->send_buffer = NULL;
status = ucp_perf_test_alloc_contig(perf, params, &perf->send_buffer,
buffer_size * params->thread_count,
&perf->ucp.send_memh, 1);
if (status != UCS_OK) {
goto err;
}
/* Allocate receive buffer memory */
perf->recv_buffer = NULL;
status = ucp_perf_test_alloc_contig(perf, params, &perf->recv_buffer,
buffer_size * params->thread_count,
&perf->ucp.recv_memh, 0);
if (status != UCS_OK) {
goto err_free_send_buffer;
}
/* Allocate IOV datatype memory */
perf->params.msg_size_cnt = params->msg_size_cnt;
perf->ucp.send_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.send_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.send_iov);
if (UCS_OK != status) {
goto err_free_buffers;
}
perf->ucp.recv_iov = NULL;
status = ucp_perf_test_alloc_iov_mem(params->ucp.recv_datatype, perf->params.msg_size_cnt,
params->thread_count, &perf->ucp.recv_iov);
if (UCS_OK != status) {
goto err_free_send_iov_buffers;
}
return UCS_OK;
err_free_send_iov_buffers:
free(perf->ucp.send_iov);
err_free_buffers:
ucp_perf_test_free_contig(perf, perf->recv_buffer, perf->ucp.recv_memh);
err_free_send_buffer:
ucp_perf_test_free_contig(perf, perf->send_buffer, perf->ucp.send_memh);
err:
return UCS_ERR_NO_MEMORY;
}
static void ucp_perf_test_free_mem(ucx_perf_context_t *perf)
{
free(perf->ucp.recv_iov);
free(perf->ucp.send_iov);
ucp_perf_test_free_contig(perf, perf->recv_buffer, perf->ucp.recv_memh);
ucp_perf_test_free_contig(perf, perf->send_buffer, perf->ucp.send_memh);
}
static void ucp_perf_test_destroy_eps(ucx_perf_context_t* perf,
unsigned group_size)
{
ucs_status_ptr_t *reqs;
ucp_tag_recv_info_t info;
ucs_status_t status;
unsigned i;
reqs = calloc(sizeof(*reqs), group_size);
for (i = 0; i < group_size; ++i) {
if (perf->ucp.peers[i].rkey != NULL) {
ucp_rkey_destroy(perf->ucp.peers[i].rkey);
}
if (perf->ucp.peers[i].ep != NULL) {
reqs[i] = ucp_disconnect_nb(perf->ucp.peers[i].ep);
}
}
for (i = 0; i < group_size; ++i) {
if (!UCS_PTR_IS_PTR(reqs[i])) {
continue;
}
do {
ucp_worker_progress(perf->ucp.worker);
status = ucp_request_test(reqs[i], &info);
} while (status == UCS_INPROGRESS);
ucp_request_release(reqs[i]);
}
free(reqs);
free(perf->ucp.peers);
}
static ucs_status_t ucp_perf_test_exchange_status(ucx_perf_context_t *perf,
ucs_status_t status)
{
unsigned group_size = rte_call(perf, group_size);
ucs_status_t collective_status = status;
struct iovec vec;
void *req = NULL;
unsigned i;
vec.iov_base = &status;
vec.iov_len = sizeof(status);
rte_call(perf, post_vec, &vec, 1, &req);
rte_call(perf, exchange_vec, req);
for (i = 0; i < group_size; ++i) {
rte_call(perf, recv, i, &status, sizeof(status), req);
if (status != UCS_OK) {
collective_status = status;
}
}
return collective_status;
}
static ucs_status_t ucp_perf_test_setup_endpoints(ucx_perf_context_t *perf,
uint64_t features)
{
const size_t buffer_size = 2048;
ucx_perf_ep_info_t info, *remote_info;
unsigned group_size, i, group_index;
ucp_address_t *address;
size_t address_length = 0;
ucp_ep_params_t ep_params;
ucs_status_t status;
struct iovec vec[3];
void *rkey_buffer;
void *req = NULL;
void *buffer;
group_size = rte_call(perf, group_size);
group_index = rte_call(perf, group_index);
status = ucp_worker_get_address(perf->ucp.worker, &address, &address_length);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_worker_get_address() failed: %s", ucs_status_string(status));
}
goto err;
}
info.ucp.addr_len = address_length;
info.recv_buffer = (uintptr_t)perf->recv_buffer;
vec[0].iov_base = &info;
vec[0].iov_len = sizeof(info);
vec[1].iov_base = address;
vec[1].iov_len = address_length;
if (features & (UCP_FEATURE_RMA|UCP_FEATURE_AMO32|UCP_FEATURE_AMO64)) {
status = ucp_rkey_pack(perf->ucp.context, perf->ucp.recv_memh,
&rkey_buffer, &info.rkey_size);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_rkey_pack() failed: %s", ucs_status_string(status));
}
ucp_worker_release_address(perf->ucp.worker, address);
goto err;
}
vec[2].iov_base = rkey_buffer;
vec[2].iov_len = info.rkey_size;
rte_call(perf, post_vec, vec, 3, &req);
ucp_rkey_buffer_release(rkey_buffer);
} else {
info.rkey_size = 0;
rte_call(perf, post_vec, vec, 2, &req);
}
ucp_worker_release_address(perf->ucp.worker, address);
rte_call(perf, exchange_vec, req);
perf->ucp.peers = calloc(group_size, sizeof(*perf->uct.peers));
if (perf->ucp.peers == NULL) {
goto err;
}
buffer = malloc(buffer_size);
if (buffer == NULL) {
ucs_error("Failed to allocate RTE receive buffer");
status = UCS_ERR_NO_MEMORY;
goto err_destroy_eps;
}
for (i = 0; i < group_size; ++i) {
if (i == group_index) {
continue;
}
rte_call(perf, recv, i, buffer, buffer_size, req);
remote_info = buffer;
address = (void*)(remote_info + 1);
rkey_buffer = (void*)address + remote_info->ucp.addr_len;
perf->ucp.peers[i].remote_addr = remote_info->recv_buffer;
ep_params.field_mask = UCP_EP_PARAM_FIELD_REMOTE_ADDRESS;
ep_params.address = address;
status = ucp_ep_create(perf->ucp.worker, &ep_params, &perf->ucp.peers[i].ep);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("ucp_ep_create() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
if (remote_info->rkey_size > 0) {
status = ucp_ep_rkey_unpack(perf->ucp.peers[i].ep, rkey_buffer,
&perf->ucp.peers[i].rkey);
if (status != UCS_OK) {
if (perf->params.flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_fatal("ucp_rkey_unpack() failed: %s", ucs_status_string(status));
}
goto err_free_buffer;
}
} else {
perf->ucp.peers[i].rkey = NULL;
}
}
free(buffer);
status = ucp_perf_test_exchange_status(perf, UCS_OK);
if (status != UCS_OK) {
ucp_perf_test_destroy_eps(perf, group_size);
}
/* force wireup completion */
status = ucp_worker_flush(perf->ucp.worker);
if (status != UCS_OK) {
ucs_warn("ucp_worker_flush() failed: %s", ucs_status_string(status));
}
return status;
err_free_buffer:
free(buffer);
err_destroy_eps:
ucp_perf_test_destroy_eps(perf, group_size);
err:
(void)ucp_perf_test_exchange_status(perf, status);
return status;
}
static void ucp_perf_test_cleanup_endpoints(ucx_perf_context_t *perf)
{
unsigned group_size;
ucp_perf_barrier(perf);
group_size = rte_call(perf, group_size);
ucp_perf_test_destroy_eps(perf, group_size);
}
static void ucx_perf_set_warmup(ucx_perf_context_t* perf, ucx_perf_params_t* params)
{
perf->max_iter = ucs_min(params->warmup_iter, params->max_iter / 10);
perf->report_interval = -1;
}
static ucs_status_t uct_perf_create_md(ucx_perf_context_t *perf)
{
uct_md_resource_desc_t *md_resources;
uct_tl_resource_desc_t *tl_resources;
unsigned i, num_md_resources;
unsigned j, num_tl_resources;
ucs_status_t status;
uct_md_h md;
uct_md_config_t *md_config;
status = uct_query_md_resources(&md_resources, &num_md_resources);
if (status != UCS_OK) {
goto out;
}
for (i = 0; i < num_md_resources; ++i) {
status = uct_md_config_read(md_resources[i].md_name, NULL, NULL, &md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_open(md_resources[i].md_name, md_config, &md);
uct_config_release(md_config);
if (status != UCS_OK) {
goto out_release_md_resources;
}
status = uct_md_query_tl_resources(md, &tl_resources, &num_tl_resources);
if (status != UCS_OK) {
uct_md_close(md);
goto out_release_md_resources;
}
for (j = 0; j < num_tl_resources; ++j) {
if (!strcmp(perf->params.uct.tl_name, tl_resources[j].tl_name) &&
!strcmp(perf->params.uct.dev_name, tl_resources[j].dev_name))
{
uct_release_tl_resource_list(tl_resources);
perf->uct.md = md;
status = UCS_OK;
goto out_release_md_resources;
}
}
uct_md_close(md);
uct_release_tl_resource_list(tl_resources);
}
ucs_error("Cannot use transport %s on device %s", perf->params.uct.tl_name,
perf->params.uct.dev_name);
status = UCS_ERR_NO_DEVICE;
out_release_md_resources:
uct_release_md_resource_list(md_resources);
out:
return status;
}
void uct_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))uct_worker_progress,
(void*)perf->uct.worker);
}
void ucp_perf_barrier(ucx_perf_context_t *perf)
{
rte_call(perf, barrier, (void(*)(void*))ucp_worker_progress,
(void*)perf->ucp.worker);
}
static ucs_status_t uct_perf_setup(ucx_perf_context_t *perf, ucx_perf_params_t *params)
{
uct_iface_config_t *iface_config;
ucs_status_t status;
uct_iface_params_t iface_params = {
.open_mode = UCT_IFACE_OPEN_MODE_DEVICE,
.mode.device.tl_name = params->uct.tl_name,
.mode.device.dev_name = params->uct.dev_name,
.stats_root = ucs_stats_get_root(),
.rx_headroom = 0
};
UCS_CPU_ZERO(&iface_params.cpu_mask);
status = ucs_async_context_init(&perf->uct.async, params->async_mode);
if (status != UCS_OK) {
goto out;
}
status = uct_worker_create(&perf->uct.async, params->thread_mode,
&perf->uct.worker);
if (status != UCS_OK) {
goto out_cleanup_async;
}
status = uct_perf_create_md(perf);
if (status != UCS_OK) {
goto out_destroy_worker;
}
status = uct_md_iface_config_read(perf->uct.md, params->uct.tl_name, NULL,
NULL, &iface_config);
if (status != UCS_OK) {
goto out_destroy_md;
}
status = uct_iface_open(perf->uct.md, perf->uct.worker, &iface_params,
iface_config, &perf->uct.iface);
uct_config_release(iface_config);
if (status != UCS_OK) {
ucs_error("Failed to open iface: %s", ucs_status_string(status));
goto out_destroy_md;
}
status = uct_perf_test_check_capabilities(params, perf->uct.iface);
/* sync status across all processes */
status = ucp_perf_test_exchange_status(perf, status);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
goto out_iface_close;
}
status = uct_perf_test_setup_endpoints(perf);
if (status != UCS_OK) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
goto out_free_mem;
}
uct_iface_progress_enable(perf->uct.iface,
UCT_PROGRESS_SEND | UCT_PROGRESS_RECV);
return UCS_OK;
out_free_mem:
uct_perf_test_free_mem(perf);
out_iface_close:
uct_iface_close(perf->uct.iface);
out_destroy_md:
uct_md_close(perf->uct.md);
out_destroy_worker:
uct_worker_destroy(perf->uct.worker);
out_cleanup_async:
ucs_async_context_cleanup(&perf->uct.async);
out:
return status;
}
static void uct_perf_cleanup(ucx_perf_context_t *perf)
{
uct_perf_test_cleanup_endpoints(perf);
uct_perf_test_free_mem(perf);
uct_iface_close(perf->uct.iface);
uct_md_close(perf->uct.md);
uct_worker_destroy(perf->uct.worker);
ucs_async_context_cleanup(&perf->uct.async);
}
static ucs_status_t ucp_perf_setup(ucx_perf_context_t *perf,
ucx_perf_params_t *params)
{
ucp_params_t ucp_params;
ucp_worker_params_t worker_params;
ucp_config_t *config;
ucs_status_t status;
ucp_params.field_mask = UCP_PARAM_FIELD_FEATURES;
ucp_params.features = 0;
status = ucp_perf_test_fill_params(params, &ucp_params);
if (status != UCS_OK) {
goto err;
}
status = ucp_config_read(NULL, NULL, &config);
if (status != UCS_OK) {
goto err;
}
status = ucp_init(&ucp_params, config, &perf->ucp.context);
ucp_config_release(config);
if (status != UCS_OK) {
goto err;
}
worker_params.field_mask = UCP_WORKER_PARAM_FIELD_THREAD_MODE;
worker_params.thread_mode = params->thread_mode;
status = ucp_worker_create(perf->ucp.context, &worker_params,
&perf->ucp.worker);
if (status != UCS_OK) {
goto err_cleanup;
}
status = ucp_perf_test_alloc_mem(perf, params);
if (status != UCS_OK) {
ucs_warn("ucp test failed to alocate memory");
goto err_destroy_worker;
}
status = ucp_perf_test_setup_endpoints(perf, ucp_params.features);
if (status != UCS_OK) {
if (params->flags & UCX_PERF_TEST_FLAG_VERBOSE) {
ucs_error("Failed to setup endpoints: %s", ucs_status_string(status));
}
goto err_free_mem;
}
return UCS_OK;
err_free_mem:
ucp_perf_test_free_mem(perf);
err_destroy_worker:
ucp_worker_destroy(perf->ucp.worker);
err_cleanup:
ucp_cleanup(perf->ucp.context);
err:
return status;
}
static void ucp_perf_cleanup(ucx_perf_context_t *perf)
{
ucp_perf_test_cleanup_endpoints(perf);
ucp_perf_barrier(perf);
ucp_perf_test_free_mem(perf);
ucp_worker_destroy(perf->ucp.worker);
ucp_cleanup(perf->ucp.context);
}
static struct {
ucs_status_t (*setup)(ucx_perf_context_t *perf, ucx_perf_params_t *params);
void (*cleanup)(ucx_perf_context_t *perf);
ucs_status_t (*run)(ucx_perf_context_t *perf);
void (*barrier)(ucx_perf_context_t *perf);
} ucx_perf_funcs[] = {
[UCX_PERF_API_UCT] = {uct_perf_setup, uct_perf_cleanup,
uct_perf_test_dispatch, uct_perf_barrier},
[UCX_PERF_API_UCP] = {ucp_perf_setup, ucp_perf_cleanup,
ucp_perf_test_dispatch, ucp_perf_barrier}
};
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result);
#if HAVE_CUDA
static ucs_status_t ucx_perf_init_cuda_device(ucx_perf_context_t *perf)
{
cudaError_t cerr;
unsigned group_index;
int num_gpus;
int gpu_index;
group_index = rte_call(perf, group_index);
cerr = cudaGetDeviceCount(&num_gpus);
if (cerr != cudaSuccess) {
return UCS_ERR_NO_DEVICE;
}
gpu_index = group_index % num_gpus;
cerr = cudaSetDevice(gpu_index);
if (cerr != cudaSuccess) {
return UCS_ERR_NO_DEVICE;
}
return UCS_OK;
}
#endif
ucs_status_t ucx_perf_run(ucx_perf_params_t *params, ucx_perf_result_t *result)
{
ucx_perf_context_t *perf;
ucs_status_t status;
if (params->command == UCX_PERF_CMD_LAST) {
ucs_error("Test is not selected");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
if ((params->api != UCX_PERF_API_UCT) && (params->api != UCX_PERF_API_UCP)) {
ucs_error("Invalid test API parameter (should be UCT or UCP)");
status = UCS_ERR_INVALID_PARAM;
goto out;
}
perf = malloc(sizeof(*perf));
if (perf == NULL) {
status = UCS_ERR_NO_MEMORY;
goto out;
}
ucx_perf_test_reset(perf, params);
#if HAVE_CUDA
if ((params->mem_type == UCT_MD_MEM_TYPE_CUDA) ||
(params->mem_type == UCT_MD_MEM_TYPE_CUDA_MANAGED)) {
status = ucx_perf_init_cuda_device(perf);
if (status != UCS_OK) {
goto out_free;
}
}
#endif
status = ucx_perf_funcs[params->api].setup(perf, params);
if (status != UCS_OK) {
goto out_free;
}
if (UCS_THREAD_MODE_SINGLE == params->thread_mode) {
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
status = ucx_perf_funcs[params->api].run(perf);
if (status != UCS_OK) {
goto out_cleanup;
}
ucx_perf_funcs[params->api].barrier(perf);
ucx_perf_test_reset(perf, params);
}
/* Run test */
status = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
if (status == UCS_OK) {
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
} else {
status = ucx_perf_thread_spawn(perf, result);
}
out_cleanup:
ucx_perf_funcs[params->api].cleanup(perf);
out_free:
free(perf);
out:
return status;
}
#if _OPENMP
/* multiple threads sharing the same worker/iface */
#include <omp.h>
typedef struct {
pthread_t pt;
int tid;
int ntid;
ucs_status_t* statuses;
ucx_perf_context_t perf;
ucx_perf_result_t result;
} ucx_perf_thread_context_t;
static void* ucx_perf_thread_run_test(void* arg)
{
ucx_perf_thread_context_t* tctx = (ucx_perf_thread_context_t*) arg;
ucx_perf_result_t* result = &tctx->result;
ucx_perf_context_t* perf = &tctx->perf;
ucx_perf_params_t* params = &perf->params;
ucs_status_t* statuses = tctx->statuses;
int tid = tctx->tid;
int i;
if (params->warmup_iter > 0) {
ucx_perf_set_warmup(perf, params);
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
ucx_perf_test_reset(perf, params);
}
/* Run test */
#pragma omp barrier
statuses[tid] = ucx_perf_funcs[params->api].run(perf);
ucx_perf_funcs[params->api].barrier(perf);
for (i = 0; i < tctx->ntid; i++) {
if (UCS_OK != statuses[i]) {
goto out;
}
}
#pragma omp master
{
/* Assuming all threads are fairly treated, reporting only tid==0
TODO: aggregate reports */
ucx_perf_calc_result(perf, result);
rte_call(perf, report, result, perf->params.report_arg, 1);
}
out:
return &statuses[tid];
}
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result)
{
ucx_perf_thread_context_t* tctx;
ucs_status_t* statuses;
size_t message_size;
ucs_status_t status;
int ti, nti;
message_size = ucx_perf_get_message_size(&perf->params);
omp_set_num_threads(perf->params.thread_count);
nti = perf->params.thread_count;
tctx = calloc(nti, sizeof(ucx_perf_thread_context_t));
statuses = calloc(nti, sizeof(ucs_status_t));
if ((tctx == NULL) || (statuses == NULL)) {
status = UCS_ERR_NO_MEMORY;
goto out_free;
}
#pragma omp parallel private(ti)
{
ti = omp_get_thread_num();
tctx[ti].tid = ti;
tctx[ti].ntid = nti;
tctx[ti].statuses = statuses;
tctx[ti].perf = *perf;
/* Doctor the src and dst buffers to make them thread specific */
tctx[ti].perf.send_buffer += ti * message_size;
tctx[ti].perf.recv_buffer += ti * message_size;
tctx[ti].perf.offset = ti * message_size;
ucx_perf_thread_run_test((void*)&tctx[ti]);
}
status = UCS_OK;
for (ti = 0; ti < nti; ti++) {
if (UCS_OK != statuses[ti]) {
ucs_error("Thread %d failed to run test: %s", tctx[ti].tid,
ucs_status_string(statuses[ti]));
status = statuses[ti];
}
}
out_free:
free(statuses);
free(tctx);
return status;
}
#else
static int ucx_perf_thread_spawn(ucx_perf_context_t *perf,
ucx_perf_result_t* result) {
ucs_error("Invalid test parameter (thread mode requested without OpenMP capabilities)");
return UCS_ERR_INVALID_PARAM;
}
#endif /* _OPENMP */
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/fx-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
#define LeftShiftOperator 0xf5U
#define RightShiftOperator 0xf6U
#define LessThanEqualOperator 0xf7U
#define GreaterThanEqualOperator 0xf8U
#define EqualOperator 0xf9U
#define NotEqualOperator 0xfaU
#define LogicalAndOperator 0xfbU
#define LogicalOrOperator 0xfcU
#define ExponentialNotation 0xfdU
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *image,const char *expression)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: the expression.
%
*/
MagickExport FxInfo *AcquireFxInfo(const Image *image,const char *expression)
{
char
fx_op[2];
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
fx_info=(FxInfo *) AcquireMagickMemory(sizeof(*fx_info));
if (fx_info == (FxInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=image;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishAlignedMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",fx_op);
*fx_op=(char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",fx_op);
*fx_op=(char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",fx_op);
*fx_op=(char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",fx_op);
*fx_op=(char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",fx_op);
*fx_op=(char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",fx_op);
*fx_op=(char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",fx_op);
*fx_op=(char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",fx_op);
*fx_op=(char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",fx_op);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% ExceptionInfo *exception)
% Image *AddNoiseImageChannel(const Image *image,const ChannelType channel,
% const NoiseType noise_type,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
ExceptionInfo *exception)
{
Image
*noise_image;
noise_image=AddNoiseImageChannel(image,DefaultChannels,noise_type,exception);
return(noise_image);
}
MagickExport Image *AddNoiseImageChannel(const Image *image,
const ChannelType channel,const NoiseType noise_type,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
const char
*option;
double
attenuate;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
noise_image=AccelerateAddNoiseImage(image,channel,noise_type,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
InheritException(exception,&noise_image->exception);
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
attenuate=1.0;
option=GetImageArtifact(image,"attenuate");
if (option != (char *) NULL)
attenuate=StringToDouble(option,(char **) NULL);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register IndexPacket
*magick_restrict noise_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(GenerateDifferentialNoise(random_info[id],
GetPixelRed(p),noise_type,attenuate)));
if (IsGrayColorspace(image->colorspace) != MagickFalse)
{
SetPixelGreen(q,GetPixelRed(q));
SetPixelBlue(q,GetPixelRed(q));
}
else
{
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelGreen(p),noise_type,attenuate)));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelBlue(p),noise_type,attenuate)));
}
if ((channel & OpacityChannel) != 0)
SetPixelOpacity(q,ClampToQuantum(GenerateDifferentialNoise(
random_info[id],GetPixelOpacity(p),noise_type,attenuate)));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(noise_indexes+x,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],GetPixelIndex(
indexes+x),noise_type,attenuate)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_AddNoiseImage)
#endif
proceed=SetImageProgress(image,AddNoiseImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
shift_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass) == MagickFalse)
{
InheritException(exception,&shift_image->exception);
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
MagickPixelPacket
pixel;
Quantum
quantum;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(p);
if (GetPixelGreen(p) < quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) < quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(GetPixelRed(p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(p)+factor*quantum);
quantum=GetPixelRed(p);
if (GetPixelGreen(p) > quantum)
quantum=GetPixelGreen(p);
if (GetPixelBlue(p) > quantum)
quantum=GetPixelBlue(p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(q,ClampToQuantum(pixel.red));
SetPixelGreen(q,ClampToQuantum(pixel.green));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_BlueShiftImage)
#endif
proceed=SetImageProgress(image,BlueShiftImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*clone_image,
*edge_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
edge_image=EdgeImage(clone_image,radius,exception);
clone_image=DestroyImage(clone_image);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(charcoal_image);
(void) NegateImage(charcoal_image,MagickFalse);
(void) GrayscaleImage(charcoal_image,image->intensity);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *opacity,
% const PixelPacket colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A character string indicating the level of opacity as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *opacity,
const PixelPacket colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
CacheView
*colorize_view,
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
colorize_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass) == MagickFalse)
{
InheritException(exception,&colorize_image->exception);
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) ||
(IsPixelGray(&colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace);
if ((colorize_image->matte == MagickFalse) &&
(colorize.opacity != OpaqueOpacity))
(void) SetImageAlphaChannel(colorize_image,OpaqueAlphaChannel);
if (opacity == (const char *) NULL)
return(colorize_image);
/*
Determine RGB values of the pen color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
colorize_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,colorize_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(colorize_view,0,y,colorize_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,((GetPixelRed(p)*(100.0-pixel.red)+
colorize.red*pixel.red)/100.0));
SetPixelGreen(q,((GetPixelGreen(p)*(100.0-pixel.green)+
colorize.green*pixel.green)/100.0));
SetPixelBlue(q,((GetPixelBlue(p)*(100.0-pixel.blue)+
colorize.blue*pixel.blue)/100.0));
if (colorize_image->matte == MagickFalse)
SetPixelOpacity(q,GetPixelOpacity(p));
else
SetPixelOpacity(q,((GetPixelOpacity(p)*(100.0-pixel.opacity)+
colorize.opacity*pixel.opacity)/100.0));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(colorize_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorizeImage)
#endif
proceed=SetImageProgress(image,ColorizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
colorize_view=DestroyCacheView(colorize_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
u,
v,
y;
/*
Create color matrix.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass) == MagickFalse)
{
InheritException(exception,&color_image->exception);
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MaxTextExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MaxTextExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MaxTextExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
ColorMatrix image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickRealType
pixel;
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register IndexPacket
*magick_restrict color_indexes;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
color_indexes=GetCacheViewAuthenticIndexQueue(color_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
v;
size_t
height;
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (v=0; v < (ssize_t) height; v++)
{
pixel=ColorMatrix[v][0]*GetPixelRed(p)+ColorMatrix[v][1]*
GetPixelGreen(p)+ColorMatrix[v][2]*GetPixelBlue(p);
if (image->matte != MagickFalse)
pixel+=ColorMatrix[v][3]*(QuantumRange-GetPixelOpacity(p));
if (image->colorspace == CMYKColorspace)
pixel+=ColorMatrix[v][4]*GetPixelIndex(indexes+x);
pixel+=QuantumRange*ColorMatrix[v][5];
switch (v)
{
case 0: SetPixelRed(q,ClampToQuantum(pixel)); break;
case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break;
case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break;
case 3:
{
if (image->matte != MagickFalse)
SetPixelAlpha(q,ClampToQuantum(pixel));
break;
}
case 4:
{
if (image->colorspace == CMYKColorspace)
SetPixelIndex(color_indexes+x,ClampToQuantum(pixel));
break;
}
}
}
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorMatrixImage)
#endif
proceed=SetImageProgress(image,ColorMatrixImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
% const ChannelType channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,double *alpha,
% Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double FxChannelStatistics(FxInfo *fx_info,const Image *image,
ChannelType channel,const char *symbol,ExceptionInfo *exception)
{
char
channel_symbol[MaxTextExtent],
key[MaxTextExtent],
statistic[MaxTextExtent];
const char
*value;
register const char
*p;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
*channel_symbol='\0';
if (*p == '.')
{
ssize_t
option;
(void) CopyMagickString(channel_symbol,p+1,MaxTextExtent);
option=ParseCommandOption(MagickChannelOptions,MagickTrue,channel_symbol);
if (option >= 0)
channel=(ChannelType) option;
}
(void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=(const char *) GetValueFromSplayTree(fx_info->symbols,key);
if (value != (const char *) NULL)
return(QuantumScale*StringToDouble(value,(char **) NULL));
(void) DeleteNodeFromSplayTree(fx_info->symbols,key);
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageChannelDepth(image,channel,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%.20g",(double) depth);
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",kurtosis);
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",maxima);
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",mean);
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",minima);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",skewness);
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
(void) FormatLocaleString(statistic,MaxTextExtent,"%g",
standard_deviation);
}
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(key),
ConstantString(statistic));
return(QuantumScale*StringToDouble(statistic,(char **) NULL));
}
static double
FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t,
const ssize_t,const char *,size_t *,double *,ExceptionInfo *);
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,
ExceptionInfo *exception)
{
char
*q,
subexpression[MaxTextExtent],
symbol[MaxTextExtent];
const char
*p,
*value;
double
alpha,
beta;
Image
*image;
MagickPixelPacket
pixel;
PointInfo
point;
register ssize_t
i;
size_t
length;
size_t
depth,
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
depth=0;
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
i=(ssize_t) (alpha+0.5);
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x=alpha;
point.y=beta;
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
&depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
p++;
}
if (*p == '.')
p++;
}
}
length=GetImageListLength(fx_info->images);
while (i < 0)
i+=(ssize_t) length;
if (length != 0)
i%=length;
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
GetMagickPixelPacket(image,&pixel);
(void) InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
if ((strlen(p) > 2) &&
(LocaleCompare(p,"intensity") != 0) &&
(LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) &&
(LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MaxTextExtent];
(void) CopyMagickString(name,p,MaxTextExtent);
for (q=name+(strlen(name)-1); q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
if ((strlen(name) > 2) &&
(GetValueFromSplayTree(fx_info->symbols,name) == (const char *) NULL))
{
MagickPixelPacket
*color;
color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors,
name);
if (color != (MagickPixelPacket *) NULL)
{
pixel=(*color);
p+=strlen(name);
}
else
if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(name),
CloneMagickPixelPacket(&pixel));
p+=strlen(name);
}
}
}
(void) CopyMagickString(symbol,p,MaxTextExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedChannel: return(QuantumScale*pixel.red);
case GreenChannel: return(QuantumScale*pixel.green);
case BlueChannel: return(QuantumScale*pixel.blue);
case OpacityChannel:
{
double
alpha;
if (pixel.matte == MagickFalse)
return(1.0);
alpha=(double) (QuantumScale*GetPixelAlpha(&pixel));
return(alpha);
}
case IndexChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
case DefaultChannels:
return(QuantumScale*GetMagickPixelIntensity(image,&pixel));
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((double) (QuantumScale*GetPixelAlpha(&pixel)));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(symbol,"channel",7) == 0)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case OpacityChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BlueChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case OpacityChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case IndexChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"intensity") == 0)
return(QuantumScale*GetMagickPixelIntensity(image,&pixel));
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminance;
luminance=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminance);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.opacity);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
{
double
depth;
depth=(double) GetImageChannelDepth(image,channel,fx_info->exception);
return(depth);
}
break;
}
default:
break;
}
value=(const char *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (value != (const char *) NULL)
return(StringToDouble(value,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",symbol);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=0;
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while (*expression != '\0')
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
expression+=5;
break;
}
#endif
if (LocaleNCompare(expression,"atan2",5) == 0)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit((int) ((unsigned char) c)) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((LocaleNCompare(expression,"j0",2) == 0) ||
(LocaleNCompare(expression,"j1",2) == 0))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) ||
(strchr(")",(int) ((unsigned char) c)) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit((int) ((unsigned char) c)) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,size_t *depth,
double *beta,ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
char
*q,
subexpression[MaxTextExtent];
double
alpha,
gamma;
register const char
*p;
*beta=0.0;
if (exception->severity >= ErrorException)
return(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
return(0.0);
*subexpression='\0';
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) (~(size_t) *beta);
return(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow((double) alpha,(double) FxEvaluateSubexpression(fx_info,
channel,x,y,++p,depth,beta,exception));
return(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(alpha/(*beta));
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=fabs(floor(((double) *beta)+0.5));
if (*beta == 0.0)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"DivideByZero","`%s'",expression);
return(0.0);
}
return(fmod((double) alpha,(double) *beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha-(*beta));
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
return(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
return(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
return(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
return(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
return(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
return(*beta);
}
case '?':
{
double
gamma;
(void) CopyMagickString(subexpression,++p,MaxTextExtent);
q=subexpression;
p=StringToken(":",&q);
if (q == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
if (fabs((double) alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q,depth,beta,
exception);
return(gamma);
}
case '=':
{
char
numeric[MaxTextExtent];
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
return(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
(void) FormatLocaleString(numeric,MaxTextExtent,"%g",(double)
*beta);
(void) DeleteNodeFromSplayTree(fx_info->symbols,subexpression);
(void) AddValueToSplayTree(fx_info->symbols,ConstantString(
subexpression),ConstantString(numeric));
return(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth,beta,
exception);
return(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,p,depth,beta,
exception);
return(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
(*depth)++;
if (*depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
(void) CopyMagickString(subexpression,expression+1,MaxTextExtent);
subexpression[strlen(subexpression)-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth,
beta,exception);
(*depth)--;
return(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth,beta,
exception);
return((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (LocaleNCompare(expression,"abs",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(fabs((double) alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (LocaleNCompare(expression,"acosh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(acosh((double) alpha));
}
#endif
if (LocaleNCompare(expression,"acos",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(acos((double) alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"airy",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha);
return(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (LocaleNCompare(expression,"asinh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(asinh((double) alpha));
}
#endif
if (LocaleNCompare(expression,"asin",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(asin((double) alpha));
}
if (LocaleNCompare(expression,"alt",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"atan2",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atan2((double) alpha,(double) *beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (LocaleNCompare(expression,"atanh",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(atanh((double) alpha));
}
#endif
if (LocaleNCompare(expression,"atan",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(atan((double) alpha));
}
if (LocaleCompare(expression,"a") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'C':
case 'c':
{
if (LocaleNCompare(expression,"ceil",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(ceil((double) alpha));
}
if (LocaleNCompare(expression,"clamp",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha < 0.0)
return(0.0);
if (alpha > 1.0)
return(1.0);
return(alpha);
}
if (LocaleNCompare(expression,"cosh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth, beta,exception);
return(cosh((double) alpha));
}
if (LocaleNCompare(expression,"cos",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(cos((double) alpha));
}
if (LocaleCompare(expression,"c") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(expression,"debug",5) == 0)
{
const char
*type;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (fx_info->images->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel: type="cyan"; break;
case MagentaChannel: type="magenta"; break;
case YellowChannel: type="yellow"; break;
case OpacityChannel: type="opacity"; break;
case BlackChannel: type="black"; break;
default: type="unknown"; break;
}
else
switch (channel)
{
case RedChannel: type="red"; break;
case GreenChannel: type="green"; break;
case BlueChannel: type="blue"; break;
case OpacityChannel: type="opacity"; break;
default: type="unknown"; break;
}
(void) CopyMagickString(subexpression,expression+6,MaxTextExtent);
if (strlen(subexpression) > 1)
subexpression[strlen(subexpression)-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,
"%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename,
(double) x,(double) y,type,subexpression,GetMagickPrecision(),
(double) alpha);
return(0.0);
}
if (LocaleNCompare(expression,"drc",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
return(MagickEpsilon);
if (LocaleNCompare(expression,"exp",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(exp((double) alpha));
}
if (LocaleCompare(expression,"e") == 0)
return(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (LocaleNCompare(expression,"floor",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor((double) alpha));
}
break;
}
case 'G':
case 'g':
{
if (LocaleNCompare(expression,"gauss",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
gamma=exp((double) (-alpha*alpha/2.0))/sqrt(2.0*MagickPI);
return(gamma);
}
if (LocaleNCompare(expression,"gcd",3) == 0)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType)
(*beta+0.5));
return((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleCompare(expression,"hue") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"hypot",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(hypot((double) alpha,(double) *beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'I':
case 'i':
{
if (LocaleCompare(expression,"intensity") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"int",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(floor(alpha));
}
if (LocaleNCompare(expression,"isnan",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return((double) !!IsNaN((double) alpha));
}
if (LocaleCompare(expression,"i") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (LocaleNCompare(expression,"j0",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(j0((double) alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"j1",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth, beta,exception);
return(j1((double) alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (LocaleNCompare(expression,"jinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0.0)
return(1.0);
gamma=(2.0*j1((double) (MagickPI*alpha))/(MagickPI*alpha));
return(gamma);
}
#endif
break;
}
case 'L':
case 'l':
{
if (LocaleNCompare(expression,"ln",2) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,depth,
beta,exception);
return(log((double) alpha));
}
if (LocaleNCompare(expression,"logtwo",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return(log10((double) alpha))/log10(2.0);
}
if (LocaleNCompare(expression,"log",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(log10((double) alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
return((double) QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (LocaleNCompare(expression,"max",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (LocaleNCompare(expression,"min",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(alpha < *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"mod",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
gamma=alpha-floor((double) (alpha/(*beta)))*(*beta);
return(gamma);
}
if (LocaleCompare(expression,"m") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'N':
case 'n':
{
if (LocaleNCompare(expression,"not",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
return(1.0);
if (LocaleCompare(expression,"o") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
return(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
return(MagickPI);
if (LocaleNCompare(expression,"pow",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(pow((double) alpha,(double) *beta));
}
if (LocaleCompare(expression,"p") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
return((double) QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
return(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (LocaleNCompare(expression,"rand",4) == 0)
{
double
alpha;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
return(alpha);
}
if (LocaleNCompare(expression,"round",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
return(floor((double) alpha+0.5));
}
if (LocaleCompare(expression,"r") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
if (LocaleNCompare(expression,"sign",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(alpha < 0.0 ? -1.0 : 1.0);
}
if (LocaleNCompare(expression,"sinc",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
if (alpha == 0)
return(1.0);
gamma=(sin((double) (MagickPI*alpha))/(MagickPI*alpha));
return(gamma);
}
if (LocaleNCompare(expression,"sinh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sinh((double) alpha));
}
if (LocaleNCompare(expression,"sin",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(sin((double) alpha));
}
if (LocaleNCompare(expression,"sqrt",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(sqrt((double) alpha));
}
if (LocaleNCompare(expression,"squish",6) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,depth,
beta,exception);
return((1.0/(1.0+exp((double) (-alpha)))));
}
if (LocaleCompare(expression,"s") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'T':
case 't':
{
if (LocaleNCompare(expression,"tanh",4) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,depth,
beta,exception);
return(tanh((double) alpha));
}
if (LocaleNCompare(expression,"tan",3) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,depth,
beta,exception);
return(tan((double) alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
return(0.0);
if (LocaleNCompare(expression,"trunc",5) == 0)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,depth,
beta,exception);
if (alpha >= 0.0)
return(floor((double) alpha));
return(ceil((double) alpha));
}
if (LocaleCompare(expression,"t") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'W':
case 'w':
{
if (LocaleNCompare(expression,"while",5) == 0)
{
do
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth,beta,exception);
} while (fabs((double) alpha) >= MagickEpsilon);
return(*beta);
}
if (LocaleCompare(expression,"w") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
return(FxGetSymbol(fx_info,channel,x,y,expression,exception));
return(alpha);
}
MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
fx_info->file=file;
return(status);
}
MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const ChannelType channel,const ssize_t x,const ssize_t y,double *alpha,
ExceptionInfo *exception)
{
double
beta;
size_t
depth;
beta=0.0;
depth=0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,&depth,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
% Image *FxImageChannel(const Image *image,const ChannelType channel,
% const char *expression,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) ResetMagickMemory(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
Image
*fx_image;
fx_image=FxImageChannel(image,GrayChannel,expression,exception);
return(fx_image);
}
MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel,
const char *expression,ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse)
{
InheritException(exception,&fx_image->exception);
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
alpha;
register IndexPacket
*magick_restrict fx_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view);
alpha=0.0;
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y,
&alpha,exception);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & GreenChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y,
&alpha,exception);
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & BlueChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y,
&alpha,exception);
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & OpacityChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y,
&alpha,exception);
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange*
alpha));
else
SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange-
QuantumRange*alpha)));
}
if (((channel & IndexChannel) != 0) &&
(fx_image->colorspace == CMYKColorspace))
{
(void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y,
&alpha,exception);
SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType)
QuantumRange*alpha));
}
q++;
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxImageChannel)
#endif
proceed=SetImageProgress(image,FxImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*image_view,
*implode_view;
double
radius;
Image
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
implode_image=CloneImage(image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(implode_image,DirectClass) == MagickFalse)
{
InheritException(exception,&implode_image->exception);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
if (implode_image->background_color.opacity != OpaqueOpacity)
implode_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*image->columns;
center.y=0.5*image->rows;
radius=center.x;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
{
scale.x=(double) image->rows/(double) image->columns;
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(implode_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,implode_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
MagickPixelPacket
pixel;
PointInfo
delta;
register IndexPacket
*magick_restrict implode_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
implode_indexes=GetCacheViewAuthenticIndexQueue(implode_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin((double) (MagickPI*sqrt((double) distance)/
radius/2)),-amount);
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) (factor*delta.x/scale.x+
center.x),(double) (factor*delta.y/scale.y+center.y),&pixel,
exception);
SetPixelPacket(implode_image,&pixel,q,implode_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ImplodeImage)
#endif
proceed=SetImageProgress(image,ImplodeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,
const size_t number_frames,ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
register const Image
*next;
register ssize_t
i;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (i=1; i < (ssize_t) number_frames; i++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) i,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (i=0; i < (ssize_t) number_frames; i++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (i+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*
next->rows+beta*GetNextImageInList(next)->rows+0.5),
next->filter,next->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
if (SetImageStorageClass(morph_image,DirectClass) == MagickFalse)
{
InheritException(exception,&morph_image->exception);
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,
GetNextImageInList(next)->blur,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
SetPixelRed(q,ClampToQuantum(alpha*
GetPixelRed(q)+beta*GetPixelRed(p)));
SetPixelGreen(q,ClampToQuantum(alpha*
GetPixelGreen(q)+beta*GetPixelGreen(p)));
SetPixelBlue(q,ClampToQuantum(alpha*
GetPixelBlue(q)+beta*GetPixelBlue(p)));
SetPixelOpacity(q,ClampToQuantum(alpha*
GetPixelOpacity(q)+beta*GetPixelOpacity(p)));
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (i < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_MorphImages)
#endif
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *random_info,
const MagickRealType pixel,const double noise)
{
Quantum
plasma;
plasma=ClampToQuantum(pixel+noise*GetPseudoRandomValue(random_info)-
noise/2.0);
if (plasma <= 0)
return((Quantum) 0);
if (plasma >= QuantumRange)
return(QuantumRange);
return(plasma);
}
MagickExport MagickBooleanType PlasmaImageProxy(Image *image,
CacheView *image_view,CacheView *u_view,CacheView *v_view,
RandomInfo *random_info,const SegmentInfo *segment,size_t attenuate,
size_t depth)
{
ExceptionInfo
*exception;
double
plasma;
PixelPacket
u,
v;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) <= MagickEpsilon) &&
(fabs(segment->y2-segment->y1) <= MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
MagickBooleanType
status;
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
(void) PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth);
return(status);
}
x_mid=(ssize_t) ceil((segment->x1+segment->x2)/2-0.5);
y_mid=(ssize_t) ceil((segment->y1+segment->y2)/2-0.5);
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
exception=(&image->exception);
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->x2-x_mid) > MagickEpsilon))
{
register PixelPacket
*magick_restrict q;
/*
Left pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/
2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) > MagickEpsilon)
{
/*
Right pixel.
*/
x=(ssize_t) ceil(segment->x2-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,x,(ssize_t)
ceil(segment->y1-0.5),&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,x,(ssize_t)
ceil(segment->y2-0.5),&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/
2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+
v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) > MagickEpsilon) ||
(fabs(segment->y2-y_mid) > MagickEpsilon))
{
register PixelPacket
*magick_restrict q;
/*
Bottom pixel.
*/
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/
2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+
v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) > MagickEpsilon)
{
register PixelPacket
*magick_restrict q;
/*
Top pixel.
*/
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,(ssize_t)
ceil(segment->x1-0.5),y,&u,exception);
(void) GetOneCacheViewVirtualPixel(v_view,(ssize_t)
ceil(segment->x2-0.5),y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+
v.red)/2.0,plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+
v.blue)/2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) > MagickEpsilon) ||
(fabs(segment->y1-segment->y2) > MagickEpsilon))
{
register PixelPacket
*magick_restrict q;
/*
Middle pixel.
*/
x=(ssize_t) ceil(segment->x1-0.5);
y=(ssize_t) ceil(segment->y1-0.5);
(void) GetOneCacheViewVirtualPixel(u_view,x,y,&u,exception);
x=(ssize_t) ceil(segment->x2-0.5);
y=(ssize_t) ceil(segment->y2-0.5);
(void) GetOneCacheViewVirtualPixel(v_view,x,y,&v,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if (q == (PixelPacket *) NULL)
return(MagickTrue);
SetPixelRed(q,PlasmaPixel(random_info,(MagickRealType) (u.red+v.red)/2.0,
plasma));
SetPixelGreen(q,PlasmaPixel(random_info,(MagickRealType) (u.green+
v.green)/2.0,plasma));
SetPixelBlue(q,PlasmaPixel(random_info,(MagickRealType) (u.blue+v.blue)/
2.0,plasma));
(void) SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,&image->exception);
u_view=AcquireVirtualCacheView(image,&image->exception);
v_view=AcquireVirtualCacheView(image,&image->exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the AnnotateImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const double angle,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const double angle,ExceptionInfo *exception)
{
const char
*value;
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
value=GetImageProperty(image,"Caption");
if (value != (const char *) NULL)
{
char
*caption,
geometry[MaxTextExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
caption=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,
value);
(void) CloneString(&annotate_info->text,caption);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,&metrics,
&caption);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5));
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image);
(void) CloneString(&annotate_info->text,caption);
(void) FormatLocaleString(geometry,MaxTextExtent,"+0+%g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
caption=DestroyString(caption);
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image);
(void) CompositeImage(picture_image,OverCompositeOp,image,quantum,quantum);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,OverCompositeOp,caption_image,
quantum,(ssize_t) (image->rows+3*quantum/2));
caption_image=DestroyImage(caption_image);
}
(void) QueryColorDatabase("none",&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
InheritException(&bend_image->exception,exception);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,OverCompositeOp,picture_image,
(ssize_t) (-0.01*picture_image->columns/2.0),0L);
picture_image=DestroyImage(picture_image);
(void) QueryColorDatabase("none",&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
sepia_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass) == MagickFalse)
{
InheritException(exception,&sepia_image->exception);
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(q,ClampToQuantum(tone));
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(q,ClampToQuantum(tone));
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(q,ClampToQuantum(tone));
tone=threshold/7.0;
if ((double) GetPixelGreen(q) < tone)
SetPixelGreen(q,ClampToQuantum(tone));
if ((double) GetPixelBlue(q) < tone)
SetPixelBlue(q,ClampToQuantum(tone));
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SepiaToneImage)
#endif
proceed=SetImageProgress(image,SepiaToneImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image);
(void) ContrastImage(sepia_image,MagickTrue);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double opacity,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double opacity,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod);
clone_image->compose=OverCompositeOp;
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorDatabase("none",&clone_image->border_color,exception);
border_image=BorderImage(clone_image,&border_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->matte == MagickFalse)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel);
/*
Shadow image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(border_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(border_image,border_image,border_image->rows,1)
#endif
for (y=0; y < (ssize_t) border_image->rows; y++)
{
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
SetPixelRed(q,border_image->background_color.red);
SetPixelGreen(q,border_image->background_color.green);
SetPixelBlue(q,border_image->background_color.blue);
if (border_image->matte == MagickFalse)
SetPixelOpacity(q,border_image->background_color.opacity);
else
SetPixelOpacity(q,ClampToQuantum((double) (QuantumRange-
GetPixelAlpha(q)*opacity/100.0)));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ShadowImage)
#endif
proceed=SetImageProgress(image,ShadowImageTag,progress++,
border_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shadow_image=BlurImageChannel(border_image,AlphaChannel,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting
% the center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: Apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
MagickPixelPacket
zero;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
random_view=AcquireAuthenticCacheView(random_image,exception);
if (AccelerateRandomImage(random_image,exception) == MagickFalse)
{
status=MagickTrue;
GetMagickPixelPacket(random_image,&zero);
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(random_view);
pixel=zero;
for (x=0; x < (ssize_t) random_image->columns; x++)
{
pixel.red=(MagickRealType) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
pixel.green=pixel.red;
pixel.blue=pixel.red;
if (image->colorspace == CMYKColorspace)
pixel.index=pixel.red;
SetPixelPacket(random_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_view=DestroyCacheView(random_view);
random_image=DestroyImage(random_image);
return(random_image);
}
}
random_view=DestroyCacheView(random_view);
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
(void) NormalizeImage(dodge_image);
(void) NegateImage(dodge_image,MagickFalse);
(void) TransformImage(&dodge_image,(char *) NULL,"50%");
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,ColorDodgeCompositeOp,dodge_image,0,0);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,BlendCompositeOp,blend_image,0,0);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold)
% MagickBooleanType SolarizeImageChannel(Image *image,
% const ChannelType channel,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold)
{
MagickBooleanType
status;
status=SolarizeImageChannel(image,DefaultChannels,threshold,
&image->exception);
return(status);
}
MagickExport MagickBooleanType SolarizeImageChannel(Image *image,
const ChannelType channel,const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((channel & GreenChannel) != 0)
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((channel & BlueChannel) != 0)
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
if ((double) GetPixelRed(q) > threshold)
SetPixelRed(q,QuantumRange-GetPixelRed(q));
if ((channel & GreenChannel) != 0)
if ((double) GetPixelGreen(q) > threshold)
SetPixelGreen(q,QuantumRange-GetPixelGreen(q));
if ((channel & BlueChannel) != 0)
if ((double) GetPixelBlue(q) > threshold)
SetPixelBlue(q,QuantumRange-GetPixelBlue(q));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SolarizeImage)
#endif
proceed=SetImageProgress(image,SolarizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (alpha)=(Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelPacket
pixel;
register PixelPacket
*q;
register ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stegano_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stegano_image->exception);
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
(void) GetOneCacheViewVirtualPixel(watermark_view,x,y,&pixel,exception);
if ((k/(ssize_t) stegano_image->columns) >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (PixelPacket *) NULL)
break;
switch (c)
{
case 0:
{
SetBit(GetPixelRed(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 1:
{
SetBit(GetPixelGreen(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
case 2:
{
SetBit(GetPixelBlue(q),j,GetBit(ClampToQuantum(GetPixelIntensity(
image,&pixel)),i));
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (stegano_image->storage_class == PseudoClass)
(void) SyncImage(stegano_image);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
assert(right_image != (const Image *) NULL);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass) == MagickFalse)
{
InheritException(exception,&stereo_image->exception);
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
register const PixelPacket
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
register PixelPacket
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL) ||
(r == (PixelPacket *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(r,GetPixelRed(p));
SetPixelGreen(r,GetPixelGreen(q));
SetPixelBlue(r,GetPixelBlue(q));
SetPixelOpacity(r,(GetPixelOpacity(p)+q->opacity)/2);
p++;
q++;
r++;
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*image_view,
*swirl_view;
double
radius;
Image
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
swirl_image=CloneImage(image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(swirl_image,DirectClass) == MagickFalse)
{
InheritException(exception,&swirl_image->exception);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.opacity != OpaqueOpacity)
swirl_image->matte=MagickTrue;
/*
Compute scaling factor.
*/
center.x=(double) image->columns/2.0;
center.y=(double) image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (image->columns > image->rows)
scale.y=(double) image->columns/(double) image->rows;
else
if (image->columns < image->rows)
scale.x=(double) image->rows/(double) image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(swirl_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,swirl_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
distance;
MagickPixelPacket
pixel;
PointInfo
delta;
register IndexPacket
*magick_restrict swirl_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
swirl_indexes=GetCacheViewAuthenticIndexQueue(swirl_view);
delta.y=scale.y*(double) (y-center.y);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance < (radius*radius))
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) ((cosine*delta.x-sine*delta.y)/
scale.x+center.x),(double) ((sine*delta.x+cosine*delta.y)/scale.y+
center.y),&pixel,exception);
SetPixelPacket(swirl_image,&pixel,q,swirl_indexes+x);
}
q++;
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SwirlImage)
#endif
proceed=SetImageProgress(image,SwirlImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *opacity,
% const PixelPacket tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *opacity,
const PixelPacket tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
color_vector,
pixel;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
tint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass) == MagickFalse)
{
InheritException(exception,&tint_image->exception);
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelGray(&tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace);
if (opacity == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the tint color.
*/
flags=ParseGeometry(opacity,&geometry_info);
pixel.red=geometry_info.rho;
pixel.green=geometry_info.rho;
pixel.blue=geometry_info.rho;
pixel.opacity=(MagickRealType) OpaqueOpacity;
if ((flags & SigmaValue) != 0)
pixel.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
pixel.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
pixel.opacity=geometry_info.psi;
color_vector.red=(MagickRealType) (pixel.red*tint.red/100.0-
PixelPacketIntensity(&tint));
color_vector.green=(MagickRealType) (pixel.green*tint.green/100.0-
PixelPacketIntensity(&tint));
color_vector.blue=(MagickRealType) (pixel.blue*tint.blue/100.0-
PixelPacketIntensity(&tint));
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
weight;
MagickPixelPacket
pixel;
weight=QuantumScale*GetPixelRed(p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(p)+color_vector.red*(1.0-(4.0*
(weight*weight)));
SetPixelRed(q,ClampToQuantum(pixel.red));
weight=QuantumScale*GetPixelGreen(p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(p)+color_vector.green*(1.0-
(4.0*(weight*weight)));
SetPixelGreen(q,ClampToQuantum(pixel.green));
weight=QuantumScale*GetPixelBlue(p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(p)+color_vector.blue*(1.0-(4.0*
(weight*weight)));
SetPixelBlue(q,ClampToQuantum(pixel.blue));
SetPixelOpacity(q,GetPixelOpacity(p));
p++;
q++;
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TintImage)
#endif
proceed=SetImageProgress(image,TintImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MaxTextExtent];
DrawInfo
*draw_info;
Image
*blur_image,
*canvas_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas_image,DirectClass) == MagickFalse)
{
InheritException(exception,&canvas_image->exception);
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
canvas_image->matte=MagickTrue;
oval_image=CloneImage(canvas_image,canvas_image->columns,canvas_image->rows,
MagickTrue,exception);
if (oval_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
(void) QueryColorDatabase("#000000",&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorDatabase("#ffffff",&draw_info->fill,exception);
(void) QueryColorDatabase("#ffffff",&draw_info->stroke,exception);
(void) FormatLocaleString(ellipse,MaxTextExtent,
"ellipse %g,%g,%g,%g,0.0,360.0",image->columns/2.0,
image->rows/2.0,image->columns/2.0-x,image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
blur_image->matte=MagickFalse;
(void) CompositeImage(canvas_image,CopyOpacityCompositeOp,blur_image,0,0);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas_image,FlattenLayer,exception);
canvas_image=DestroyImage(canvas_image);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*image_view,
*wave_view;
Image
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
zero;
MagickRealType
*sine_map;
register ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
wave_image=CloneImage(image,image->columns,(size_t) (image->rows+2.0*
fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(wave_image,DirectClass) == MagickFalse)
{
InheritException(exception,&wave_image->exception);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
if (wave_image->background_color.opacity != OpaqueOpacity)
wave_image->matte=MagickTrue;
/*
Allocate sine map.
*/
sine_map=(MagickRealType *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (MagickRealType *) NULL)
{
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=fabs(amplitude)+amplitude*sin((double) ((2.0*MagickPI*i)/
wave_length));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
GetMagickPixelPacket(wave_image,&zero);
image_view=AcquireVirtualCacheView(image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
MagickPixelPacket
pixel;
register IndexPacket
*magick_restrict indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(wave_view);
pixel=zero;
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
(void) InterpolateMagickPixelPacket(image,image_view,
UndefinedInterpolatePixel,(double) x,(double) (y-sine_map[x]),&pixel,
exception);
SetPixelPacket(wave_image,&pixel,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_WaveImage)
#endif
proceed=SetImageProgress(image,WaveImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
image_view=DestroyCacheView(image_view);
sine_map=(MagickRealType *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
register ssize_t
i;
p=pixels;
q=pixels+scale*stride,
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
size_t
max_channels;
ssize_t
channel;
static const double
noise_levels[]= {
0.8002, 0.2735, 0.1202, 0.0585, 0.0291, 0.0152, 0.0080, 0.0044 };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
noise_image=(Image *) NULL;
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
noise_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,3*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns),
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=image->columns*image->rows;
max_channels=(size_t) (image->colorspace == CMYKColorspace ? 4 : 3);
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) max_channels; channel++)
{
register ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
if (status == MagickFalse)
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const IndexPacket
*magick_restrict indexes;
register const PixelPacket
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (channel)
{
case 0: pixels[i]=(float) GetPixelRed(p); break;
case 1: pixels[i]=(float) GetPixelGreen(p); break;
case 2: pixels[i]=(float) GetPixelBlue(p); break;
case 3: pixels[i]=(float) indexes[x]; break;
default: break;
}
i++;
p++;
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x,
y;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=(const float *) kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,(size_t) (1 << level),p);
q+=low_pass;
for (x=0; x < (ssize_t) image->columns; x++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register float
*magick_restrict p,
*magick_restrict q;
register ssize_t
y;
p=(const float *) kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,(size_t) (1 << level),p);
for (y=0; y < (ssize_t) image->rows; y++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register IndexPacket
*magick_restrict noise_indexes;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
break;
}
noise_indexes=GetCacheViewAuthenticIndexQueue(noise_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
float
pixel;
pixel=pixels[i]+pixels[low_pass+i];
switch (channel)
{
case 0: SetPixelRed(q,ClampToQuantum(pixel)); break;
case 1: SetPixelGreen(q,ClampToQuantum(pixel)); break;
case 2: SetPixelBlue(q,ClampToQuantum(pixel)); break;
case 3: SetPixelIndex(noise_indexes+x,ClampToQuantum(pixel)); break;
default: break;
}
i++;
q++;
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,max_channels);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
return(noise_image);
}
|
dtrsm.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/ztrsm.c, normal z -> d, Fri Sep 28 17:38:03 2018
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_trsm
*
* Solves one of the matrix equations
*
* \f[ op( A ) \times X = \alpha B, \f] or
* \f[ X \times op( A ) = \alpha B, \f]
*
* where op( A ) is one of:
* \f[ op( A ) = A, \f]
* \f[ op( A ) = A^T, \f]
* \f[ op( A ) = A^T, \f]
*
* alpha is a scalar, X and B are m-by-n matrices, and
* A is a unit or non-unit, upper or lower triangular matrix.
* The matrix X overwrites B.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] m
* The number of rows of the matrix B. m >= 0.
*
* @param[in] n
* The number of columns of the matrix B. n >= 0.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] pA
* The k-by-k triangular matrix,
* where k = m if side = PlasmaLeft,
* and k = n if side = PlasmaRight.
* If uplo = PlasmaUpper, the leading k-by-k upper triangular part
* of the array A contains the upper triangular matrix, and the
* strictly lower triangular part of A is not referenced.
* If uplo = PlasmaLower, the leading k-by-k lower triangular part
* of the array A contains the lower triangular matrix, and the
* strictly upper triangular part of A is not referenced.
* If diag = PlasmaUnit, the diagonal elements of A are also not
* referenced and are assumed to be 1.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,k).
*
* @param[in,out] pB
* On entry, the m-by-n right hand side matrix B.
* On exit, if return value = 0, the m-by-n solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,m).
*
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
*
*******************************************************************************
*
* @sa plasma_omp_dtrsm
* @sa plasma_ctrsm
* @sa plasma_dtrsm
* @sa plasma_strsm
*
******************************************************************************/
int plasma_dtrsm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
int m, int n,
double alpha, double *pA, int lda,
double *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((side != PlasmaLeft) &&
(side != PlasmaRight)) {
plasma_error("illegal value of side");
return -1;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -2;
}
if ((transa != PlasmaConjTrans) &&
(transa != PlasmaNoTrans) &&
(transa != PlasmaTrans )) {
plasma_error("illegal value of transa");
return -3;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
return -4;
}
if (m < 0) {
plasma_error("illegal value of m");
return -5;
}
if (n < 0) {
plasma_error("illegal value of n");
return -6;
}
int an;
if (side == PlasmaLeft)
an = m;
else
an = n;
if (lda < imax(1, an)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, m)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if ((m == 0) || (n == 0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_trsm(plasma, PlasmaRealDouble, m, n);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t A;
plasma_desc_t B;
int retval;
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
an, an, 0, 0, an, an, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb,
m, n, 0, 0, m, n, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_dge2desc(pA, lda, A, &sequence, &request);
plasma_omp_dge2desc(pB, ldb, B, &sequence, &request);
// Call the tile async function.
plasma_omp_dtrsm(side, uplo, transa, diag,
alpha, A,
B,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_ddesc2ge(B, pB, ldb, &sequence, &request);
}
// implicit synchronization
// Free matrices in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&B);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_trsm
*
* Computes triangular solve.
* Non-blocking tile version of plasma_dtrsm().
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] side
* - PlasmaLeft: op(A)*X = B,
* - PlasmaRight: X*op(A) = B.
*
* @param[in] uplo
* - PlasmaUpper: A is upper triangular,
* - PlasmaLower: A is lower triangular.
*
* @param[in] transa
* - PlasmaNoTrans: A is not transposed,
* - PlasmaTrans: A is transposed,
* - PlasmaConjTrans: A is conjugate transposed.
*
* @param[in] diag
* - PlasmaNonUnit: A has non-unit diagonal,
* - PlasmaUnit: A has unit diagonal.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* Descriptor of matrix A.
*
* @param[in] B
* Descriptor of matrix B.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_dtrsm
* @sa plasma_omp_ctrsm
* @sa plasma_omp_dtrsm
* @sa plasma_omp_strsm
*
******************************************************************************/
void plasma_omp_dtrsm(plasma_enum_t side, plasma_enum_t uplo,
plasma_enum_t transa, plasma_enum_t diag,
double alpha, plasma_desc_t A,
plasma_desc_t B,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((side != PlasmaLeft) &&
(side != PlasmaRight)) {
plasma_error("illegal value of side");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((transa != PlasmaConjTrans) &&
(transa != PlasmaNoTrans) &&
(transa != PlasmaTrans)) {
plasma_error("illegal value of transa");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if ((diag != PlasmaUnit) &&
(diag != PlasmaNonUnit)) {
plasma_error("illegal value of diag");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if ((B.m == 0) || (B.n == 0))
return;
// Call the parallel function.
plasma_pdtrsm(side, uplo,
transa, diag,
alpha, A,
B,
sequence, request);
}
|
selu_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: hhchen@openailab.com
*/
#include "selu_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "utility/sys_port.h"
#include "utility/float.h"
#include "utility/log.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include <math.h>
int ref_selu_fp32(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param,
int num_thread)
{
float* data = (float*)input_tensor->data;
float* out_data = (float*)output_tensor->data;
float alpha = selu_param->alpha;
float lambda = selu_param->lambda;
float alpha_lambda = alpha * lambda;
int chan_num = input_tensor->dims[0] * input_tensor->dims[1];
int chan_size = input_tensor->dims[2] * input_tensor->dims[3];
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < chan_num; i++)
{
int offset = i * chan_size;
float* input_data = (float*)input_tensor->data + i * chan_size;
float* output_data = (float*)output_tensor->data + i * chan_size;
for (int j = 0; j < chan_size; j++)
{
if (input_data[j] < 0.f)
output_data[j] = (exp(input_data[j]) - 1.f) * alpha_lambda;
else
output_data[j] = input_data[j] * lambda;
}
}
return 0;
}
int ref_selu_uint8(struct tensor* output_tensor, struct tensor* input_tensor, struct selu_param* selu_param,
int num_thread)
{
/* dequant */
uint8_t* input_uint8 = (uint8_t*)input_tensor->data;
uint8_t* output_uint8 = (uint8_t*)output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
int input_size = input_tensor->elem_num;
int output_size = output_tensor->elem_num;
float* input_data = (float*)sys_malloc(input_size * sizeof(float));
float* output_data = (float*)sys_malloc(output_size * sizeof(float));
for (int i = 0; i < input_size; i++)
{
input_data[i] = ((float)input_uint8[i] - (float)input_zero) * input_scale;
}
float alpha = selu_param->alpha;
float lambda = selu_param->lambda;
float alpha_lambda = alpha * lambda;
int chan_num = input_tensor->dims[0] * input_tensor->dims[1];
int chan_size = input_tensor->dims[2] * input_tensor->dims[3];
#pragma omp parallel for num_threads(num_thread)
for (int i = 0; i < chan_num; i++)
{
int offset = i * chan_size;
input_data = (float*)input_tensor->data + i * chan_size;
output_data = (float*)output_tensor->data + i * chan_size;
for (int j = 0; j < chan_size; j++)
{
if (input_data[j] < 0.f)
output_data[j] = (exp(input_data[j]) - 1.f) * alpha_lambda;
else
output_data[j] = input_data[j] * lambda;
}
}
/* quant */
for (int i = 0; i < output_size; i++)
{
int udata = round(output_data[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(input_data);
sys_free(output_data);
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
struct tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct selu_param* selu_param = (struct selu_param*)ir_node->op.param_mem;
int num_thread = exec_graph->num_thread;
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_selu_fp32(output_tensor, input_tensor, selu_param, num_thread);
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_selu_uint8(output_tensor, input_tensor, selu_param, num_thread);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
struct node* ir_node = exec_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
if (input_tensor->data_type != TENGINE_DT_FP32 || input_tensor->layout != TENGINE_LAYOUT_NCHW)
return 0;
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_selu_ref_op()
{
return register_builtin_node_ops(OP_SELU, &hcl_node_ops);
}
int unregister_selu_ref_op()
{
return unregister_builtin_node_ops(OP_SELU, &hcl_node_ops);
}
|
mapper_utilities.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Philipp Bucher, Jordi Cotela
//
// See Master-Thesis P.Bucher
// "Development and Implementation of a Parallel
// Framework for Non-Matching Grid Mapping"
#if !defined(KRATOS_MAPPER_UTILITIES_H_INCLUDED)
#define KRATOS_MAPPER_UTILITIES_H_INCLUDED
// System includes
// External includes
// Project includes
#include "includes/model_part.h"
#include "custom_utilities/mapper_flags.h"
#include "custom_utilities/mapper_local_system.h"
namespace Kratos
{
namespace MapperUtilities
{
typedef std::size_t SizeType;
typedef std::size_t IndexType;
typedef Node<3> NodeType;
typedef Kratos::unique_ptr<MapperInterfaceInfo> MapperInterfaceInfoUniquePointerType;
typedef Kratos::shared_ptr<MapperInterfaceInfo> MapperInterfaceInfoPointerType;
typedef std::vector<std::vector<MapperInterfaceInfoPointerType>> MapperInterfaceInfoPointerVectorType;
typedef Kratos::unique_ptr<MapperLocalSystem> MapperLocalSystemPointer;
typedef std::vector<MapperLocalSystemPointer> MapperLocalSystemPointerVector;
typedef Kratos::shared_ptr<MapperLocalSystemPointerVector> MapperLocalSystemPointerVectorPointer;
template< class TVarType >
static void FillFunction(const NodeType& rNode,
const TVarType& rVariable,
double& rValue)
{
rValue = rNode.FastGetSolutionStepValue(rVariable);
}
template< class TVarType >
static void FillFunctionNonHist(const NodeType& rNode,
const TVarType& rVariable,
double& rValue)
{
rValue = rNode.GetValue(rVariable);
}
template< class TVarType >
static std::function<void(const NodeType&, const TVarType&, double&)>
GetFillFunction(const Kratos::Flags& rMappingOptions)
{
if (rMappingOptions.Is(MapperFlags::FROM_NON_HISTORICAL))
return &FillFunctionNonHist<TVarType>;
return &FillFunction<TVarType>;
}
template< class TVarType >
static void UpdateFunction(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.FastGetSolutionStepValue(rVariable) = Value * Factor;
}
template< class TVarType >
static void UpdateFunctionWithAdd(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.FastGetSolutionStepValue(rVariable) += Value * Factor;
}
template< class TVarType >
static void UpdateFunctionNonHist(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.GetValue(rVariable) = Value * Factor;
}
template< class TVarType >
static void UpdateFunctionNonHistWithAdd(NodeType& rNode,
const TVarType& rVariable,
const double Value,
const double Factor)
{
rNode.GetValue(rVariable) += Value * Factor;
}
template< class TVarType >
static std::function<void(NodeType&, const TVarType&, const double, const double)>
GetUpdateFunction(const Kratos::Flags& rMappingOptions)
{
if (rMappingOptions.Is(MapperFlags::ADD_VALUES) && rMappingOptions.Is(MapperFlags::TO_NON_HISTORICAL))
return &UpdateFunctionNonHistWithAdd<TVarType>;
if (rMappingOptions.Is(MapperFlags::ADD_VALUES))
return &UpdateFunctionWithAdd<TVarType>;
if (rMappingOptions.Is(MapperFlags::TO_NON_HISTORICAL))
return &UpdateFunctionNonHist<TVarType>;
return &UpdateFunction<TVarType>;
}
template< class TVectorType, class TVarType >
void UpdateSystemVectorFromModelPart(TVectorType& rVector,
ModelPart& rModelPart,
const TVarType& rVariable,
const Kratos::Flags& rMappingOptions)
{
// Here we construct a function pointer to not have the if all the time inside the loop
const auto fill_fct = MapperUtilities::GetFillFunction<TVarType>(rMappingOptions);
const int num_local_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes();
const auto nodes_begin = rModelPart.GetCommunicator().LocalMesh().NodesBegin();
#pragma omp parallel for
for (int i=0; i<num_local_nodes; i++) {
fill_fct(*(nodes_begin + i), rVariable, rVector[i]);
}
}
template< class TVectorType, class TVarType >
void UpdateModelPartFromSystemVector(const TVectorType& rVector,
ModelPart& rModelPart,
const TVarType& rVariable,
const Kratos::Flags& rMappingOptions)
{
const double factor = rMappingOptions.Is(MapperFlags::SWAP_SIGN) ? -1.0 : 1.0;
// Here we construct a function pointer to not have the if all the time inside the loop
const auto update_fct = std::bind(MapperUtilities::GetUpdateFunction<TVarType>(rMappingOptions),
std::placeholders::_1,
std::placeholders::_2,
std::placeholders::_3,
factor);
const int num_local_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes();
const auto nodes_begin = rModelPart.GetCommunicator().LocalMesh().NodesBegin();
#pragma omp parallel for
for (int i=0; i<num_local_nodes; i++) {
update_fct(*(nodes_begin + i), rVariable, rVector[i]);
}
}
/**
* @brief Assigning INTERFACE_EQUATION_IDs to the nodes, with and without MPI
* This function assigns the INTERFACE_EQUATION_IDs to the nodes, which
* act as EquationIds for the MappingMatrix. This work with and without MPI,
* in MPI a ScanSum is performed with the local number of nodes
* @param rModelPartCommunicator The Modelpart-Communicator to be used
* @author Philipp Bucher
*/
void AssignInterfaceEquationIds(Communicator& rModelPartCommunicator);
template<class TMapperLocalSystem>
void CreateMapperLocalSystemsFromNodes(const Communicator& rModelPartCommunicator,
std::vector<Kratos::unique_ptr<MapperLocalSystem>>& rLocalSystems)
{
const std::size_t num_nodes = rModelPartCommunicator.LocalMesh().NumberOfNodes();
const auto nodes_ptr_begin = rModelPartCommunicator.LocalMesh().Nodes().ptr_begin();
if (rLocalSystems.size() != num_nodes) {
rLocalSystems.resize(num_nodes);
}
#pragma omp parallel for
for (int i = 0; i< static_cast<int>(num_nodes); ++i) {
auto it_node = nodes_ptr_begin + i;
rLocalSystems[i] = Kratos::make_unique<TMapperLocalSystem>((*it_node).get());
}
int num_local_systems = rModelPartCommunicator.GetDataCommunicator().SumAll((int)(rLocalSystems.size())); // int bcs of MPI
KRATOS_ERROR_IF_NOT(num_local_systems > 0)
<< "No mapper local systems were created" << std::endl;
}
inline int ComputeNumberOfNodes(ModelPart& rModelPart)
{
int num_nodes = rModelPart.GetCommunicator().LocalMesh().NumberOfNodes();
return rModelPart.GetCommunicator().GetDataCommunicator().SumAll(num_nodes); // Compute the sum among the partitions
}
inline int ComputeNumberOfConditions(ModelPart& rModelPart)
{
int num_conditions = rModelPart.GetCommunicator().LocalMesh().NumberOfConditions();
return rModelPart.GetCommunicator().GetDataCommunicator().SumAll(num_conditions); // Compute the sum among the partitions
}
inline int ComputeNumberOfElements(ModelPart& rModelPart)
{
int num_elements = rModelPart.GetCommunicator().LocalMesh().NumberOfElements();
return rModelPart.GetCommunicator().GetDataCommunicator().SumAll(num_elements); // Compute the sum among the partitions
}
template <class T1, class T2>
inline double ComputeDistance(const T1& rCoords1,
const T2& rCoords2)
{
return std::sqrt( std::pow(rCoords1[0] - rCoords2[0] , 2) +
std::pow(rCoords1[1] - rCoords2[1] , 2) +
std::pow(rCoords1[2] - rCoords2[2] , 2) );
}
template <typename T>
inline double ComputeMaxEdgeLengthLocal(const T& rEntityContainer)
{
double max_element_size = 0.0;
// Loop through each edge of a geometrical entity ONCE
for (const auto& r_entity : rEntityContainer) {
for (std::size_t i = 0; i < (r_entity.GetGeometry().size() - 1); ++i) {
for (std::size_t j = i + 1; j < r_entity.GetGeometry().size(); ++j) {
double edge_length = ComputeDistance(r_entity.GetGeometry()[i].Coordinates(),
r_entity.GetGeometry()[j].Coordinates());
max_element_size = std::max(max_element_size, edge_length);
}
}
}
return max_element_size;
}
inline double ComputeMaxEdgeLengthLocal(const ModelPart::NodesContainerType& rNodes)
{
double max_element_size = 0.0;
// TODO modify loop such that it loop only once over the nodes
for (const auto& r_node_1 : rNodes) {
for (const auto& r_node_2 : rNodes) {
double edge_length = ComputeDistance(r_node_1.Coordinates(),
r_node_2.Coordinates());
max_element_size = std::max(max_element_size, edge_length);
}
}
return max_element_size;
}
double ComputeSearchRadius(ModelPart& rModelPart, int EchoLevel);
inline double ComputeSearchRadius(ModelPart& rModelPart1, ModelPart& rModelPart2, const int EchoLevel)
{
double search_radius = std::max(ComputeSearchRadius(rModelPart1, EchoLevel),
ComputeSearchRadius(rModelPart2, EchoLevel));
KRATOS_INFO_IF("Mapper", EchoLevel > 0) << "Computed search-radius: "
<< search_radius << std::endl;
return search_radius;
}
void CheckInterfaceModelParts(const int CommRank);
std::vector<double> ComputeLocalBoundingBox(ModelPart& rModelPart);
void ComputeBoundingBoxesWithTolerance(const std::vector<double>& rBoundingBoxes,
const double Tolerance,
std::vector<double>& rBoundingBoxesWithTolerance);
std::string BoundingBoxStringStream(const std::vector<double>& rBoundingBox);
bool PointIsInsideBoundingBox(const std::vector<double>& rBoundingBox,
const array_1d<double, 3>& rCoords);
void FillBufferBeforeLocalSearch(const MapperLocalSystemPointerVector& rMapperLocalSystems,
const std::vector<double>& rBoundingBoxes,
const SizeType BufferSizeEstimate,
std::vector<std::vector<double>>& rSendBuffer,
std::vector<int>& rSendSizes);
void CreateMapperInterfaceInfosFromBuffer(const std::vector<std::vector<double>>& rRecvBuffer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo,
const int CommRank,
MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer);
void FillBufferAfterLocalSearch(MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo,
const int CommRank,
std::vector<std::vector<char>>& rSendBuffer,
std::vector<int>& rSendSizes);
void AssignInterfaceInfosAfterRemoteSearch(const MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer,
MapperLocalSystemPointerVectorPointer& rpMapperLocalSystems);
void DeserializeMapperInterfaceInfosFromBuffer(
const std::vector<std::vector<char>>& rSendBuffer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo,
const int CommRank,
MapperInterfaceInfoPointerVectorType& rMapperInterfaceInfosContainer);
/**
* @class MapperInterfaceInfoSerializer
* @ingroup MappingApplication
* @brief Helper class to serialize/deserialize a vector containing MapperInterfaceInfos
* @details This class serializes the vector containing the MapperInterfaceInfos (Shared Ptrs)
* The goal of this class is to have a more efficient/faster implementation than the
* one of the Serializer by avoiding the casting that is done in the serializer when pointers
* are serialized
* @TODO test the performance against the Serializer
* @author Philipp Bucher
*/
class KRATOS_API(MAPPING_APPLICATION) MapperInterfaceInfoSerializer
{
public:
MapperInterfaceInfoSerializer(std::vector<MapperInterfaceInfoPointerType>& rMapperInterfaceInfosContainer,
const MapperInterfaceInfoUniquePointerType& rpRefInterfaceInfo)
: mrInterfaceInfos(rMapperInterfaceInfosContainer)
, mrpRefInterfaceInfo(rpRefInterfaceInfo->Create())
{ }
private:
std::vector<MapperInterfaceInfoPointerType>& mrInterfaceInfos;
MapperInterfaceInfoPointerType mrpRefInterfaceInfo;
friend class Kratos::Serializer; // Adding "Kratos::" is nedded bcs of the "MapperUtilities"-namespace
virtual void save(Kratos::Serializer& rSerializer) const;
virtual void load(Kratos::Serializer& rSerializer);
};
} // namespace MapperUtilities.
} // namespace Kratos.
#endif // KRATOS_MAPPER_UTILITIES_H_INCLUDED defined
|
cc_pad2d.c | #include <stdio.h>
#include <string.h>
#include "cc_dtype.h"
#include "cc_fmap2d.h"
#include "cc_pad2d.h"
#include "cc_tsrmgr.h"
#define PAD_MEMCPY \
memcpy((pad->data) + p_ch_mem_size * c + \
(i + p - poffset) * p_row_mem_size + (p + j - poffset) * dtsize, \
inp->data + i_ch_mem_size * c + i * i_row_mem_size + j * dtsize, \
dtsize);
cc_tensor_t *cc_pad2d(const cc_tensor_t *inp,
cc_ssize p, cc_ssize offset, const char *name)
{
cc_tensor_t *pad = NULL;
cc_ssize shape[CC_CNN2D_SHAPE] = {0};
cc_ssize soffset = offset ? 1 : 0;
cc_ssize poffset = offset > 0 ? 1 : 0;
cc_ssize i, j, c, dtsize = cc_dtype_size(*inp->dtype);
cc_ssize i_ch_size, i_ch_mem_size, i_row_mem_size,
p_ch_size, p_ch_mem_size, p_row_mem_size;
i_ch_size = inp->shape[CC_CNN2D_SHAPE_W] *
inp->shape[CC_CNN2D_SHAPE_H];
i_ch_mem_size = i_ch_size * dtsize;
i_row_mem_size = inp->shape[CC_CNN2D_SHAPE_W] * dtsize;
#ifdef AUTO_TSRMGR
pad = cc_tsrmgr_get(name);
#endif
if (!pad) {
shape[CC_CNN2D_SHAPE_C] = inp->shape[CC_CNN2D_SHAPE_C];
shape[CC_CNN2D_SHAPE_H] =
inp->shape[CC_CNN2D_SHAPE_H] + p + p - soffset;
shape[CC_CNN2D_SHAPE_W] =
inp->shape[CC_CNN2D_SHAPE_W] + p + p - soffset;
pad = cc_create(shape, *inp->dtype, name);
}
p_ch_size = pad->shape[CC_CNN2D_SHAPE_W] *
pad->shape[CC_CNN2D_SHAPE_H];
p_ch_mem_size = p_ch_size * dtsize;
p_row_mem_size = pad->shape[CC_CNN2D_SHAPE_W] * dtsize;
#ifdef ENABLE_OPENMP
#pragma omp parallel for private(c, i, j)
#endif
for (c = 0; c < inp->shape[CC_CNN2D_SHAPE_C]; ++c) {
for (i = 0; i < inp->shape[CC_CNN2D_SHAPE_H]; ++i) {
for (j = 0; j < inp->shape[CC_CNN2D_SHAPE_W]; ++j)
PAD_MEMCPY;
}
}
return pad;
}
|
GB_unop__signum_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__signum_fc64_fc64)
// op(A') function: GB (_unop_tran__signum_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_csignum (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_csignum (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_csignum (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_SIGNUM || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__signum_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_csignum (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_csignum (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__signum_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__first_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__first_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__first_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__first_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__first_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__first_uint32)
// A*D function (colscale): GB (_AxD__first_uint32)
// D*A function (rowscale): GB (_DxB__first_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__first_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__first_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_uint32)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 1
// BinaryOp: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// true if values of B are not used
#define GB_B_IS_PATTERN \
1 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = x ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FIRST || GxB_NO_UINT32 || GxB_NO_FIRST_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__first_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__first_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__first_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__first_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__first_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__first_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__first_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__first_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__first_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__first_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = x ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = aij ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = x ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = aij ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
full_verify.c | // A test case based on IS/is.c of npb3.2-omp
// to test the handling of #if #endif during OpenMP translation
//
// 6/9/2010, Liao
//
#include <stdio.h>
#define NUM_KEYS 1000
int key_array[NUM_KEYS], key_buff_ptr_global[NUM_KEYS];
void full_verify()
{
int i, j;
int k;
int passed_verification =0;
/* Now, finally, sort the keys: */
#ifdef SERIAL_SORT
/* Copy keys into work array; keys in key_array will be reassigned. */
#ifdef _OPENMP
#pragma omp parallel for private(i)
#endif
for( i=0; i<NUM_KEYS; i++ )
key_buff2[i] = key_array[i];
/* This is actual sorting */
for( i=0; i<NUM_KEYS; i++ )
key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
#else /*SERIAL_SORT*/
/* Memory sorting can be done directly */
#ifdef _OPENMP
#pragma omp parallel for private(i,k)
#endif
for( k=0; k<NUM_KEYS; k++ ) {
i = (k==0)? 0 : key_buff_ptr_global[k-1];
while ( i<key_buff_ptr_global[k] )
key_array[i++] = k;
}
#endif /*SERIAL_SORT*/
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j = 0;
#ifdef _OPENMP
#pragma omp parallel for private(i) reduction(+:j)
#endif
for( i=1; i<NUM_KEYS; i++ )
if( key_array[i-1] > key_array[i] )
j++;
if( j != 0 )
printf( "Full_verify: number of keys out of sort: %d\n", j );
else
passed_verification++;
}
// This function is required to reproduce a bug
void rank ()
{
#ifdef _OPENMP
#pragma omp parallel
#endif
{
printf("nothing here");
}
}
|
ccl_tracers.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <gsl/gsl_errno.h>
#include <gsl/gsl_integration.h>
#include "ccl.h"
ccl_cl_tracer_collection_t *ccl_cl_tracer_collection_t_new(int *status) {
ccl_cl_tracer_collection_t *trc = NULL;
trc = malloc(sizeof(ccl_cl_tracer_collection_t));
if (trc == NULL)
*status = CCL_ERROR_MEMORY;
if (*status == 0) {
trc->n_tracers = 0;
// Currently CCL_MAX_TRACERS_PER_COLLECTION is hard-coded to 100.
// It should be enough for any practical application with minimal memory overhead
trc->ts = malloc(CCL_MAX_TRACERS_PER_COLLECTION*sizeof(ccl_cl_tracer_t *));
if (trc->ts == NULL) {
*status = CCL_ERROR_MEMORY;
free(trc);
trc = NULL;
}
}
return trc;
}
void ccl_cl_tracer_collection_t_free(ccl_cl_tracer_collection_t *trc) {
if (trc != NULL) {
if (trc->ts != NULL)
free(trc->ts);
free(trc);
}
}
void ccl_add_cl_tracer_to_collection(ccl_cl_tracer_collection_t *trc,
ccl_cl_tracer_t *tr, int *status) {
if (trc->n_tracers >= CCL_MAX_TRACERS_PER_COLLECTION) {
*status = CCL_ERROR_MEMORY;
return;
}
trc->ts[trc->n_tracers] = tr;
trc->n_tracers++;
}
//Integrand for N(z) integrator
static double nz_integrand(double z, void *pars) {
ccl_f1d_t *nz_f = (ccl_f1d_t *)pars;
return ccl_f1d_t_eval(nz_f,z);
}
// Gets area of N(z) curve
static double get_nz_norm(ccl_cosmology *cosmo, ccl_f1d_t *nz_f,
double z0, double zf, int *status) {
double nz_norm = -1, nz_enorm;
// Get N(z) norm
gsl_function F;
gsl_integration_workspace *w = NULL;
F.function = &nz_integrand;
F.params = nz_f;
w = gsl_integration_workspace_alloc(cosmo->gsl_params.N_ITERATION);
if (w == NULL) {
*status = CCL_ERROR_MEMORY;
ccl_cosmology_set_status_message(
cosmo, "ccl_tracers.c: get_nz_norm(): out of memory");
}
else {
int gslstatus = gsl_integration_qag(
&F, z0, zf, 0,
cosmo->gsl_params.INTEGRATION_EPSREL,
cosmo->gsl_params.N_ITERATION,
cosmo->gsl_params.INTEGRATION_GAUSS_KRONROD_POINTS,
w, &nz_norm, &nz_enorm);
if (gslstatus != GSL_SUCCESS) {
ccl_raise_gsl_warning(gslstatus, "ccl_tracers.c: get_nz_norm():");
*status = CCL_ERROR_INTEG;
ccl_cosmology_set_status_message(
cosmo,
"ccl_tracers.c: get_nz_norm(): "
"integration error when normalizing N(z)\n");
}
}
gsl_integration_workspace_free(w);
return nz_norm;
}
void ccl_get_number_counts_kernel(ccl_cosmology *cosmo,
int nz, double *z_arr, double *nz_arr,
int normalize_nz,
double *pchi_arr, int *status) {
// Returns dn/dchi normalized to unit area from an unnormalized dn/dz.
// Prepare N(z) spline
ccl_f1d_t *nz_f = NULL;
nz_f = ccl_f1d_t_new(nz, z_arr, nz_arr, 0, 0,
ccl_f1d_extrap_const,
ccl_f1d_extrap_const, status);
if (nz_f == NULL) {
*status = CCL_ERROR_SPLINE;
ccl_cosmology_set_status_message(
cosmo,
"ccl_tracers.c: ccl_get_number_counts_kernel(): "
"error initializing spline\n");
}
// Get N(z) normalization
double i_nz_norm = -1;
if (*status == 0) {
if (normalize_nz)
i_nz_norm = 1./get_nz_norm(cosmo, nz_f, z_arr[0], z_arr[nz-1], status);
else
i_nz_norm = 1;
}
if (*status == 0) {
// Populate arrays
for(int ichi=0; ichi < nz; ichi++) {
double a = 1./(1+z_arr[ichi]);
double h = cosmo->params.h*ccl_h_over_h0(cosmo,a,status)/ccl_constants.CLIGHT_HMPC;
// H(z) * dN/dz * 1/Ngal
pchi_arr[ichi] = h*nz_arr[ichi]*i_nz_norm;
}
}
ccl_f1d_t_free(nz_f);
}
//3 H0^2 Omega_M / 2
static double get_lensing_prefactor(ccl_cosmology *cosmo,int *status) {
double hub = cosmo->params.h/ccl_constants.CLIGHT_HMPC;
return 1.5*hub*hub*cosmo->params.Omega_m;
}
typedef struct {
ccl_cosmology *cosmo;
double z_max;
double z_end;
double chi_end;
double i_nz_norm;
ccl_f1d_t *nz_f;
ccl_f1d_t *sz_f;
int *status;
} integ_lensing_pars;
// Integrand for lensing kernel.
// Returns N(z) * (1 - 5*s(z)/2) * (chi(z)-chi) / chi(z)
static double lensing_kernel_integrand(double z, void *pars) {
integ_lensing_pars *p = (integ_lensing_pars *)pars;
double pz = ccl_f1d_t_eval(p->nz_f, z);
double qz;
if (p->sz_f == NULL) // No magnification factor
qz = 1;
else // With magnification factor
qz = (1 - 2.5*ccl_f1d_t_eval(p->sz_f, z));
if (z == 0)
return pz * qz;
else {
double chi = ccl_comoving_radial_distance(p->cosmo, 1./(1+z), p->status);
return (
pz * qz *
ccl_sinn(p->cosmo, chi-p->chi_end, p->status) /
ccl_sinn(p->cosmo, chi, p->status));
}
}
// Returns
// Integral[ p(z) * (1-5s(z)/2) * chi_end * (chi(z)-chi_end)/chi(z) , {z',z_end,z_max} ]
static double lensing_kernel_integrate(ccl_cosmology *cosmo,
integ_lensing_pars *pars,
gsl_integration_workspace *w) {
int gslstatus = 0;
double result, eresult;
gsl_function F;
F.function = &lensing_kernel_integrand;
F.params = pars;
gslstatus = gsl_integration_qag(
&F, pars->z_end, pars->z_max, 0,
cosmo->gsl_params.INTEGRATION_EPSREL,
cosmo->gsl_params.N_ITERATION,
cosmo->gsl_params.INTEGRATION_GAUSS_KRONROD_POINTS,
w, &result, &eresult);
if ((gslstatus != GSL_SUCCESS) || (*(pars->status))) {
ccl_raise_gsl_warning(gslstatus, "ccl_tracers.c: lensing_kernel_integrate():");
return -1;
}
return result * pars->i_nz_norm * pars->chi_end;
}
//Returns number of divisions on which
//the lensing kernel should be calculated
int ccl_get_nchi_lensing_kernel(int nz, double *z_arr, int *status) {
double dz = -1;
//Compute redshift step
dz = (z_arr[nz-1]-z_arr[0])/(nz-1);
//How many steps to z=0?
return (int)(z_arr[nz-1]/dz+0.5);
}
//Return array with the values of chi at
//the which the lensing kernel will be
//calculated.
void ccl_get_chis_lensing_kernel(ccl_cosmology *cosmo,
int nchi, double z_max,
double *chis, int *status) {
double dz = z_max/nchi;
for(int ichi=0; ichi < nchi; ichi++) {
double z = dz*ichi+1E-15;
double a = 1./(1+z);
chis[ichi] = ccl_comoving_radial_distance(cosmo, a, status);
}
}
//Returns array with lensing kernel:
//3 * H0^2 * Omega_M / 2 / a *
// Integral[ p(z) * (1-5s(z)/2) * chi_end * (chi(z)-chi_end)/chi(z) ,
// {z',z_end,z_max} ]
void ccl_get_lensing_mag_kernel(ccl_cosmology *cosmo,
int nz, double *z_arr, double *nz_arr,
int normalize_nz, double z_max,
int nz_s, double *zs_arr, double *sz_arr,
int nchi, double *chi_arr, double *wL_arr,
int *status) {
ccl_f1d_t *nz_f = NULL;
ccl_f1d_t *sz_f = NULL;
// Prepare N(z) spline
nz_f = ccl_f1d_t_new(nz, z_arr, nz_arr, 0, 0,
ccl_f1d_extrap_const,
ccl_f1d_extrap_const, status);
if (nz_f == NULL) {
*status = CCL_ERROR_SPLINE;
ccl_cosmology_set_status_message(
cosmo,
"ccl_tracers.c: get_lensing_mag_kernel(): error initializing spline\n");
}
// Get N(z) normalization
double i_nz_norm = -1;
if (*status == 0) {
if (normalize_nz)
i_nz_norm = 1./get_nz_norm(cosmo, nz_f, z_arr[0], z_arr[nz-1], status);
else
i_nz_norm = 1.;
}
// Prepare magnification bias spline if needed
if (*status == 0) {
if ((nz_s > 0) && (zs_arr != NULL) && (sz_arr != NULL)) {
sz_f = ccl_f1d_t_new(nz_s, zs_arr, sz_arr, sz_arr[0], sz_arr[nz_s-1],
ccl_f1d_extrap_const,
ccl_f1d_extrap_const, status);
if (sz_f == NULL) {
*status = CCL_ERROR_SPLINE;
ccl_cosmology_set_status_message(
cosmo,
"ccl_tracers.c: get_lensing_mag_kernel(): error initializing spline\n");
}
}
}
if(*status==0) {
#pragma omp parallel default(none) \
shared(cosmo, z_max, i_nz_norm, sz_f, nz_f, \
nchi, chi_arr, wL_arr, status)
{
double chi, a, z, mgfac, lens_prefac;
int ichi, local_status;
integ_lensing_pars *ipar = NULL;
gsl_integration_workspace *w = NULL;
local_status = *status;
lens_prefac = get_lensing_prefactor(cosmo, &local_status);
if (local_status == 0) {
ipar = malloc(sizeof(integ_lensing_pars));
w = gsl_integration_workspace_alloc(cosmo->gsl_params.N_ITERATION);
if ((ipar == NULL) || (w == NULL)) {
local_status = CCL_ERROR_MEMORY;
}
}
if (local_status == 0) {
ipar->cosmo = cosmo;
ipar->z_max = z_max;
ipar->i_nz_norm = i_nz_norm;
ipar->sz_f = sz_f;
ipar->nz_f = nz_f;
ipar->status = &local_status;
}
//Populate arrays
#pragma omp for
for (ichi=0; ichi < nchi; ichi++) {
if (local_status == 0) {
chi = chi_arr[ichi];
a = ccl_scale_factor_of_chi(cosmo, chi, &local_status);
z = 1./a-1;
// Add MG correction if needed
mgfac = 1.0;
if (fabs(cosmo->params.sigma_0))
mgfac += ccl_Sig_MG(cosmo, a, &local_status);
ipar->z_end = z;
ipar->chi_end = chi;
wL_arr[ichi] = lensing_kernel_integrate(cosmo, ipar, w)*(1+z)*lens_prefac*mgfac;
} else {
wL_arr[ichi] = NAN;
}
} //end omp for
gsl_integration_workspace_free(w);
free(ipar);
if (local_status) {
#pragma omp atomic write
*status = CCL_ERROR_INTEG;
}
} //end omp parallel
}
ccl_f1d_t_free(nz_f);
ccl_f1d_t_free(sz_f);
}
// Returns kernel for CMB lensing
// 3H0^2Om/2 * chi * (chi_s - chi) / chi_s / a
void ccl_get_kappa_kernel(ccl_cosmology *cosmo, double chi_source,
int nchi, double *chi_arr,
double *wchi, int *status) {
double lens_prefac = get_lensing_prefactor(cosmo, status) / ccl_sinn(cosmo, chi_source, status);
for (int ichi=0; ichi < nchi; ichi++) {
double chi = chi_arr[ichi];
double a = ccl_scale_factor_of_chi(cosmo, chi, status);
double mgfac = 1;
// Add MG correction if needed
if (fabs(cosmo->params.sigma_0))
mgfac += ccl_Sig_MG(cosmo, a, status);
wchi[ichi] = lens_prefac*(ccl_sinn(cosmo,chi_source-chi,status))*chi*mgfac/a;
}
}
ccl_cl_tracer_t *ccl_cl_tracer_t_new(ccl_cosmology *cosmo,
int der_bessel,
int der_angles,
int n_w, double *chi_w, double *w_w,
int na_ka, double *a_ka,
int nk_ka, double *lk_ka,
double *fka_arr,
double *fk_arr,
double *fa_arr,
int is_fka_log,
int is_factorizable,
int extrap_order_lok,
int extrap_order_hik,
int *status) {
ccl_cl_tracer_t *tr = NULL;
// Check der_bessel and der_angles are sensible
if ((der_angles < 0) || (der_angles > 2)) {
*status = CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(
cosmo,
"ccl_tracers.c: ccl_cl_tracer_new(): der_angles must be between 0 and 2\n");
}
if ((der_bessel < -1) || (der_bessel > 2)) {
*status = CCL_ERROR_INCONSISTENT;
ccl_cosmology_set_status_message(
cosmo,
"ccl_tracers.c: ccl_cl_tracer_new(): der_bessel must be between -1 and 2\n");
}
if (*status == 0) {
tr = malloc(sizeof(ccl_cl_tracer_t));
if (tr == NULL)
*status = CCL_ERROR_MEMORY;
}
// Initialize everythin
if (*status == 0) {
tr->der_angles = der_angles;
tr->der_bessel = der_bessel;
tr->kernel = NULL; // Initialize these to NULL
tr->transfer = NULL; // Initialize these to NULL
tr->chi_min = 0;
tr->chi_max = 1E15;
}
if (*status == 0) {
// Initialize radial kernel
if ((n_w > 0) && (chi_w != NULL) && (w_w != NULL)) {
tr->kernel = ccl_f1d_t_new(n_w,chi_w,w_w,0,0,
ccl_f1d_extrap_const,
ccl_f1d_extrap_const, status);
if (tr->kernel == NULL)
*status=CCL_ERROR_MEMORY;
}
}
// Find kernel edges
if (*status == 0) {
// If no radial kernel, set limits to zero and maximum distance
if (tr->kernel == NULL) {
tr->chi_min = 0;
tr->chi_max = ccl_comoving_radial_distance(cosmo, cosmo->spline_params.A_SPLINE_MIN, status);
}
else {
int ichi;
double w_max = fabs(w_w[0]);
// Find maximum of radial kernel
for (ichi=0; ichi < n_w; ichi++) {
if (fabs(w_w[ichi]) >= w_max)
w_max = fabs(w_w[ichi]);
}
// Multiply by fraction
w_max *= CCL_FRAC_RELEVANT;
// Initialize as the original edges in case we don't find an interval
tr->chi_min = chi_w[0];
tr->chi_max = chi_w[n_w-1];
// Find minimum
for (ichi=0; ichi < n_w; ichi++) {
if (fabs(w_w[ichi]) >= w_max) {
tr->chi_min = chi_w[ichi];
break;
}
}
// Find maximum
for (ichi=n_w-1; ichi >= 0; ichi--) {
if (fabs(w_w[ichi]) >= w_max) {
tr->chi_max = chi_w[ichi];
break;
}
}
}
}
if (*status == 0) {
if ((fka_arr != NULL) || (fk_arr != NULL) || (fa_arr != NULL)) {
tr->transfer = ccl_f2d_t_new(
na_ka,a_ka, // na, a_arr
nk_ka,lk_ka, // nk, lk_arr
fka_arr, // fka_arr
fk_arr, // fk_arr
fa_arr, // fa_arr
is_factorizable, // is factorizable
extrap_order_lok, // extrap_order_lok
extrap_order_hik, // extrap_order_hik
ccl_f2d_constantgrowth, // extrap_linear_growth
is_fka_log, // is_fka_log
NULL, // growth (function)
1, // growth_factor_0 -> will assume constant transfer function
0, // growth_exponent
ccl_f2d_3, // interp_type
status);
if (tr->transfer == NULL)
*status=CCL_ERROR_MEMORY;
}
}
return tr;
}
void ccl_cl_tracer_t_free(ccl_cl_tracer_t *tr) {
if (tr != NULL) {
if (tr->transfer != NULL)
ccl_f2d_t_free(tr->transfer);
if (tr->kernel != NULL)
ccl_f1d_t_free(tr->kernel);
free(tr);
}
}
double ccl_cl_tracer_t_get_f_ell(ccl_cl_tracer_t *tr, double ell, int *status) {
if (tr != NULL) {
if (tr->der_angles == 1)
return ell*(ell+1.);
else if (tr->der_angles == 2) {
if (ell <= 1) // This is identically 0
return 0;
else if (ell <= 10) // Use full expression in this case
return sqrt((ell+2)*(ell+1)*ell*(ell-1));
else {
double lp1h = ell+0.5;
double lp1h2 = lp1h*lp1h;
if (ell <= 1000) // This is accurate to 5E-5 for l>10
return lp1h2*(1-1.25/lp1h2);
else // This is accurate to 1E-6 for l>1000
return lp1h2;
}
}
else
return 1;
}
else
return 1;
}
double ccl_cl_tracer_t_get_kernel(ccl_cl_tracer_t *tr, double chi, int *status) {
if (tr != NULL) {
if (tr->kernel != NULL)
return ccl_f1d_t_eval(tr->kernel, chi);
else
return 1;
}
else
return 1;
}
double ccl_cl_tracer_t_get_transfer(ccl_cl_tracer_t *tr,
double lk, double a, int *status) {
if (tr != NULL) {
if (tr->transfer != NULL)
return ccl_f2d_t_eval(tr->transfer, lk, a, NULL, status);
else
return 1;
}
else
return 1;
}
|
GB_unaryop__identity_uint8_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint8_uint8
// op(A') function: GB_tran__identity_uint8_uint8
// C type: uint8_t
// A type: uint8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint8_uint8
(
uint8_t *Cx, // Cx and Ax may be aliased
uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint8_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
priority.c | /* { dg-do run } */
/* { dg-set-target-env-var OMP_MAX_TASK_PRIORITY "10" } */
/* This test verifies that the "priority" clause of omp task works as
advertised.
Testing the OpenMP task scheduler is a bit tricky, especially when
trying to determine what ran first (without explicitly calling
time() and/or synchronizing between threads). What we do here is
run in single threaded mode which guarantees that we won't run into
data races while accessing the "prio" array.
We give each task a priority from 0..63, while setting
OMP_MAX_TASK_PRIORITY to 10, which basically gives us 10 lower
priority tasks, and the rest scheduled to run earlier. We verify
that the priority < 10 tasks run last. */
#include <omp.h>
#include <stdlib.h>
#define N 64
int main()
{
int tsknum=0, prio[N];
int max_priority = omp_get_max_task_priority ();
int saved_tsknum = -1;
int i;
#pragma omp parallel num_threads(1)
#pragma omp single private (i)
{
for (i = 0; i < N; i++)
#pragma omp task priority(i ^ 1)
{
int t;
#pragma omp atomic capture seq_cst
t = tsknum++;
prio[t] = i ^ 1;
}
#pragma omp atomic read seq_cst
saved_tsknum = tsknum;
}
/* If any of the tasks have run before all tasks were created, don't
make any assumption on the task order. Otherwise, we should have
tasks with >= max_priority scheduled first in arbitrary order,
followed by the rest of tasks in decreasing priority order, as
there is only one thread that can schedule them. */
if (saved_tsknum == 0)
{
for (i = 0; i < N; i++)
if (i < N - max_priority)
{
if (prio[i] < max_priority)
abort ();
}
else if (i != N - prio[i] - 1)
abort ();
}
return 0;
}
|
server.c | #define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <gmp.h>
#ifdef HAVEOMP
#include <omp.h>
#endif
#include "globals.h"
#include "server.h"
#ifdef IR_CODE
#include "integer-reg.h"
#endif
#ifdef ALIGN
#include <malloc.h>
#endif
#ifndef BASE
#define BASE 10
#endif
#ifndef DUMPFILE
#define DUMPFILE "dump"
#endif
#ifdef IR_CODE
/**
* Converts each input number in inp to Montgomery representation, once.
*/
#ifdef RESTRICT
static void montgomerry(uint *restrict inp, size_t inplen, const uint *restrict prime)
#else
static void montgomerry(uint *inp, size_t inplen, const uint *prime)
#endif
{
const size_t N = getN();
const size_t mj = 2 * N;
size_t i, j;
#ifdef HAVEOMP
#pragma omp parallel for private(j) schedule(OMPSCHED)
#endif
for (i = 0; i < inplen; i++) {
uint *p = &inp[N * i];
#ifdef UNROLL
#pragma unroll
#endif
for (j = 0; j < mj; j++)
convert_to_mont(p, prime);
}
}
#ifdef RESTRICT
static void multiply(uint *restrict inp, size_t inplen,
uint *restrict out, size_t outlen,
const uint *restrict prime, size_t minvp)
#else
static void multiply(uint *inp, size_t inplen,
uint *out, size_t outlen,
const uint *prime, size_t minvp)
#endif
{
uint *m1 = one_to_mont(prime);
#ifdef ALIGN
__assume_aligned(m1, ALIGNBOUNDARY);
#endif
const size_t N = getN();
size_t i, j;
debug_IR("Computed once: ", m1);
#ifdef HAVEOMP
#pragma omp parallel for private(j) schedule(OMPSCHED)
#endif
for (i = 0; i < outlen; i++) {
uint *p = &out[N * i];
/* set accumulator/out to Montgomery representation of 1 */
#ifdef ALIGN
__assume_aligned(p, ALIGNBOUNDARY);
__assume_aligned(m1, ALIGNBOUNDARY);
#pragma vector aligned
#endif
for (j = 0; j < N; j++)
p[j] = m1[j];
/* multiply into out */
#ifdef UNROLL
#pragma unroll
#endif
for (j = 0; j < inplen; j++) {
uint *q = &inp[N * j];
debug_IR("to multiply: ", q);
mul_full(p, q, prime, minvp);
debug_IR("now: ", p);
}
/* convert out back from Montgomery */
convert_from_mont(p, prime, minvp);
debug_IR("final result: ", p);
}
#ifdef ALIGN
_mm_free(m1);
#else
free(m1);
#endif
}
#else
#ifdef LLIMPL
static void low_level_work_kernel(const mp_limb_t *prime, mp_size_t numlen,
size_t minvp, size_t inplen, const mp_limb_t * const * inp,
size_t outlen, mp_limb_t **out)
{
size_t i, j, sz = 2 * numlen;
mp_limb_t* scratch = calloc(sz, sizeof(scratch[0]));
mp_limb_t* quot = calloc(sz, sizeof(scratch[0]));
(void) minvp;
for (i = 0; i < outlen; i++) {
for (j = 0; j < inplen; j++) {
mpn_mul_n(scratch, out[i], inp[j], numlen);
mpn_tdiv_qr(quot, out[i], 0, scratch, sz, prime, numlen);
}
}
free(scratch);
free(quot);
}
static void low_level_impl(const mpz_t prime, size_t minvp,
size_t inplen, const mpz_t * const inp,
size_t outlen, mpz_t *out)
{
size_t i, sz = mpz_size(prime);
const mp_limb_t** inputs = calloc(inplen, sizeof(inputs[0]));
for (i = 0; i< inplen; i++)
inputs[i] = mpz_limbs_read(inp[i]);
mp_limb_t** outputs = calloc(outlen, sizeof(outputs[0]));
for (i = 0; i < outlen; i++)
outputs[i] = mpz_limbs_write(out[i], sz);
low_level_work_kernel(mpz_limbs_read(prime), sz,
minvp, inplen, inputs,
outlen, outputs);
for (i = 0; i < outlen; i++)
mpz_limbs_finish(out[i], sz);
free(outputs);
free(inputs);
}
#else
static void naive_impl(const mpz_t prime, size_t minvp,
size_t inplen, const mpz_t * const inp,
size_t outlen, mpz_t *out)
{
size_t i, j;
(void) minvp;
#ifdef HAVEOMP
#pragma omp parallel for
#endif
for (i = 0; i < outlen; i++) {
mpz_init_set_ui(out[i], 1);
for (j = 0; j < inplen; j++) {
mpz_mul(out[i], out[i], inp[j]);
mpz_mod(out[i], out[i], prime);
}
}
}
#endif
#endif
#ifdef IR_CODE
void server(size_t dbsize, const uint *prime, size_t minvp,
size_t inplen, uint *inp,
size_t outlen, uint *out)
#else
void server(size_t dbsize, const mpz_t prime, size_t minvp,
size_t inplen, const mpz_t * const inp,
size_t outlen, mpz_t *out)
#endif
{
double total_time, time_per_mul, time_per_round, mmps;
struct timespec st, en;
clock_gettime(CLOCK_MONOTONIC, &st);
#if IR_CODE
montgomerry(inp, inplen, prime);
multiply(inp, inplen, out, outlen, prime, minvp);
#else
#ifdef LLIMPL
low_level_impl(prime, minvp, inplen, inp, outlen, out);
#else
naive_impl(prime, minvp, inplen, inp, outlen, out);
#endif
#endif
clock_gettime(CLOCK_MONOTONIC, &en);
total_time = 1000 * time_diff(&st, &en); /* in ms */
time_per_mul = total_time / dbsize;
time_per_round = total_time / outlen;
mmps = 0.001 / time_per_mul; /* in mmps */
printf("Total time: %7.3lf ms\n", total_time);
printf("Time/multp: %7.3lf ms\n", time_per_mul);
printf("Time/round: %7.3lf ms\n", time_per_round);
printf("Ops/second: %7.3lf mmps\n", mmps);
}
void dump_results(size_t outlen, const mpz_t * const out)
{
FILE *f = fopen(DUMPFILE, "w");
size_t i;
if (!f) {
perror("fopen");
return;
}
for (i = 0; i < outlen; i++) {
mpz_out_str(f, BASE, out[i]);
fprintf(f, "\n");
}
fclose(f);
}
|
openmp_reduction2.c | ///TAFFO_TEST_ARGS -fopenmp
#include <stdio.h>
#define NUM_THREADS (10)
int main(int argc, char *argv[])
{
float result __attribute__((annotate("scalar(range(0,5000))"))) = 0.0;
#pragma omp parallel reduction(+:result) num_threads(NUM_THREADS)
result += 500.0;
printf("result: %f\n", result);
}
|
test_nest_lock.c | #include <stdio.h>
#include <omp.h>
omp_nest_lock_t nestable_lock;
int main() {
omp_init_nest_lock(&nestable_lock);
#pragma omp parallel num_threads(4)
{
int tid = omp_get_thread_num();
while (!omp_test_nest_lock(&nestable_lock))
printf("Thread %d - failed to acquire nestable_lock\n",
tid);
printf("Thread %d - acquired nestable_lock\n", tid);
if (omp_test_nest_lock(&nestable_lock)) {
printf("Thread %d - acquired nestable_lock again\n",
tid);
printf("Thread %d - released nestable_lock\n",
tid);
omp_unset_nest_lock(&nestable_lock);
}
printf("Thread %d - released nestable_lock\n", tid);
omp_unset_nest_lock(&nestable_lock);
}
omp_destroy_nest_lock(&nestable_lock);
}
|
J2OrbitalSoA.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
// Ye Luo, yeluo@anl.gov, Argonne National Laboratory
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#define QMCPLUSPLUS_TWOBODYJASTROW_OPTIMIZED_SOA_H
#include <map>
#include <numeric>
#include "Configuration.h"
#if !defined(QMC_BUILD_SANDBOX_ONLY)
#include "QMCWaveFunctions/WaveFunctionComponent.h"
#include "QMCWaveFunctions/Jastrow/DiffTwoBodyJastrowOrbital.h"
#endif
#include "Particle/DistanceTableData.h"
#include "LongRange/StructFact.h"
#include "CPU/SIMD/aligned_allocator.hpp"
#include "CPU/SIMD/algorithm.hpp"
namespace qmcplusplus
{
// helper class to activate KEcorr during optimizing Jastrow
template<typename RT, class FT>
class J2KECorrection
{
size_t num_groups_;
std::vector<size_t> num_elec_in_groups_;
RT num_elecs_;
RT vol;
RT G0mag;
const std::vector<FT*>& F_;
bool SK_enabled;
public:
J2KECorrection(const ParticleSet& targetPtcl, const std::vector<FT*>& F)
: num_groups_(targetPtcl.groups()),
num_elecs_(targetPtcl.getTotalNum()),
vol(targetPtcl.Lattice.Volume),
F_(F),
SK_enabled(targetPtcl.SK != nullptr)
{
// compute num_elec_in_groups_
num_elec_in_groups_.reserve(3);
for (int i = 0; i < num_groups_; i++)
num_elec_in_groups_.push_back(targetPtcl.last(i) - targetPtcl.first(i));
if (SK_enabled)
G0mag = std::sqrt(targetPtcl.SK->KLists.ksq[0]);
}
RT computeKEcorr()
{
if (!SK_enabled)
return 0;
const int numPoints = 1000;
RT uk = 0.0;
RT a = 1.0;
for (int i = 0; i < num_groups_; i++)
{
int Ni = num_elec_in_groups_[i];
for (int j = 0; j < num_groups_; j++)
{
int Nj = num_elec_in_groups_[j];
if (F_[i * num_groups_ + j])
{
FT& ufunc = *(F_[i * num_groups_ + j]);
RT radius = ufunc.cutoff_radius;
RT k = G0mag;
RT dr = radius / (RT)(numPoints - 1);
for (int ir = 0; ir < numPoints; ir++)
{
RT r = dr * (RT)ir;
RT u = ufunc.evaluate(r);
uk += 0.5 * 4.0 * M_PI * r * std::sin(k * r) / k * u * dr * (RT)Nj / (RT)(Ni + Nj);
}
}
}
}
for (int iter = 0; iter < 20; iter++)
a = uk / (4.0 * M_PI * (1.0 / (G0mag * G0mag) - 1.0 / (G0mag * G0mag + 1.0 / a)));
return 4.0 * M_PI * a / (4.0 * vol) * num_elecs_;
}
};
/** @ingroup WaveFunctionComponent
* @brief Specialization for two-body Jastrow function using multiple functors
*
* Each pair-type can have distinct function \f$u(r_{ij})\f$.
* For electrons, distinct pair correlation functions are used
* for spins up-up/down-down and up-down/down-up.
*
* Based on J2OrbitalSoA.h with these considerations
* - DistanceTableData using SoA containers
* - support mixed precision: FT::real_type != OHMMS_PRECISION
* - loops over the groups: elminated PairID
* - support simd function
* - double the loop counts
* - Memory use is O(N).
*/
template<class FT>
class J2OrbitalSoA : public WaveFunctionComponent
{
public:
///alias FuncType
using FuncType = FT;
///type of each component U, dU, d2U;
using valT = typename FT::real_type;
///element position type
using posT = TinyVector<valT, OHMMS_DIM>;
///use the same container
using DistRow = DistanceTableData::DistRow;
using DisplRow = DistanceTableData::DisplRow;
using gContainer_type = VectorSoaContainer<valT, OHMMS_DIM>;
// Ye: leaving this public is bad but currently used by unit tests.
///Container for \f$F[ig*NumGroups+jg]\f$.
std::vector<FT*> F;
protected:
///number of particles
size_t N;
///number of particles + padded
size_t N_padded;
///number of groups of the target particleset
size_t NumGroups;
///diff value
RealType DiffVal;
///Correction
RealType KEcorr;
///\f$Uat[i] = sum_(j) u_{i,j}\f$
Vector<valT> Uat;
///\f$dUat[i] = sum_(j) du_{i,j}\f$
gContainer_type dUat;
///\f$d2Uat[i] = sum_(j) d2u_{i,j}\f$
Vector<valT> d2Uat;
valT cur_Uat;
aligned_vector<valT> cur_u, cur_du, cur_d2u;
aligned_vector<valT> old_u, old_du, old_d2u;
aligned_vector<valT> DistCompressed;
aligned_vector<int> DistIndice;
///Uniquue J2 set for cleanup
std::map<std::string, FT*> J2Unique;
/// e-e table ID
const int my_table_ID_;
// helper for compute J2 Chiesa KE correction
J2KECorrection<RealType, FT> j2_ke_corr_helper;
public:
J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid);
J2OrbitalSoA(const J2OrbitalSoA& rhs) = delete;
~J2OrbitalSoA();
/* initialize storage */
void init(ParticleSet& p);
/** add functor for (ia,ib) pair */
void addFunc(int ia, int ib, FT* j);
/** check in an optimizable parameter
* @param o a super set of optimizable variables
*/
void checkInVariables(opt_variables_type& active)
{
myVars.clear();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkInVariables(active);
(*it).second->checkInVariables(myVars);
++it;
}
}
/** check out optimizable variables
*/
void checkOutVariables(const opt_variables_type& active)
{
myVars.getIndex(active);
Optimizable = myVars.is_optimizable();
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->checkOutVariables(active);
++it;
}
if (dPsi)
dPsi->checkOutVariables(active);
}
///reset the value of all the unique Two-Body Jastrow functions
void resetParameters(const opt_variables_type& active)
{
if (!Optimizable)
return;
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->resetParameters(active);
++it;
}
if (dPsi)
dPsi->resetParameters(active);
for (int i = 0; i < myVars.size(); ++i)
{
int ii = myVars.Index[i];
if (ii >= 0)
myVars[i] = active[ii];
}
}
void finalizeOptimization() { KEcorr = j2_ke_corr_helper.computeKEcorr(); }
/** print the state, e.g., optimizables */
void reportStatus(std::ostream& os)
{
typename std::map<std::string, FT*>::iterator it(J2Unique.begin()), it_end(J2Unique.end());
while (it != it_end)
{
(*it).second->myVars.print(os);
++it;
}
}
WaveFunctionComponentPtr makeClone(ParticleSet& tqp) const;
LogValueType evaluateLog(const ParticleSet& P, ParticleSet::ParticleGradient_t& G, ParticleSet::ParticleLaplacian_t& L);
void evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi);
/** recompute internal data assuming distance table is fully ready */
void recompute(const ParticleSet& P);
PsiValueType ratio(ParticleSet& P, int iat);
void evaluateRatios(const VirtualParticleSet& VP, std::vector<ValueType>& ratios)
{
for (int k = 0; k < ratios.size(); ++k)
ratios[k] =
std::exp(Uat[VP.refPtcl] - computeU(VP.refPS, VP.refPtcl, VP.getDistTable(my_table_ID_).getDistRow(k)));
}
void evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios);
GradType evalGrad(ParticleSet& P, int iat);
PsiValueType ratioGrad(ParticleSet& P, int iat, GradType& grad_iat);
void acceptMove(ParticleSet& P, int iat, bool safe_to_delay = false);
inline void restore(int iat) {}
/** compute G and L after the sweep
*/
LogValueType evaluateGL(const ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch = false);
inline void registerData(ParticleSet& P, WFBufferType& buf)
{
if (Bytes_in_WFBuffer == 0)
{
Bytes_in_WFBuffer = buf.current();
buf.add(Uat.begin(), Uat.end());
buf.add(dUat.data(), dUat.end());
buf.add(d2Uat.begin(), d2Uat.end());
Bytes_in_WFBuffer = buf.current() - Bytes_in_WFBuffer;
// free local space
Uat.free();
dUat.free();
d2Uat.free();
}
else
{
buf.forward(Bytes_in_WFBuffer);
}
}
inline void copyFromBuffer(ParticleSet& P, WFBufferType& buf)
{
Uat.attachReference(buf.lendReference<valT>(N), N);
dUat.attachReference(N, N_padded, buf.lendReference<valT>(N_padded * OHMMS_DIM));
d2Uat.attachReference(buf.lendReference<valT>(N), N);
}
LogValueType updateBuffer(ParticleSet& P, WFBufferType& buf, bool fromscratch = false)
{
evaluateGL(P, P.G, P.L, false);
buf.forward(Bytes_in_WFBuffer);
return LogValue;
}
/*@{ internal compute engines*/
inline valT computeU(const ParticleSet& P, int iat, const DistRow& dist)
{
valT curUat(0);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
curUat += f2.evaluateV(iat, iStart, iEnd, dist.data(), DistCompressed.data());
}
return curUat;
}
inline void computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle = false);
/** compute gradient
*/
inline posT accumulateG(const valT* restrict du, const DisplRow& displ) const
{
posT grad;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < N; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
return grad;
}
/**@} */
RealType ChiesaKEcorrection() { return KEcorr = j2_ke_corr_helper.computeKEcorr(); }
RealType KECorrection() { return KEcorr; }
};
template<typename FT>
J2OrbitalSoA<FT>::J2OrbitalSoA(const std::string& obj_name, ParticleSet& p, int tid)
: WaveFunctionComponent("J2OrbitalSoA", obj_name), my_table_ID_(p.addTable(p)), j2_ke_corr_helper(p, F)
{
if (myName.empty())
throw std::runtime_error("J2OrbitalSoA object name cannot be empty!");
init(p);
KEcorr = 0.0;
}
template<typename FT>
J2OrbitalSoA<FT>::~J2OrbitalSoA()
{
auto it = J2Unique.begin();
while (it != J2Unique.end())
{
delete ((*it).second);
++it;
}
} //need to clean up J2Unique
template<typename FT>
void J2OrbitalSoA<FT>::init(ParticleSet& p)
{
N = p.getTotalNum();
N_padded = getAlignedSize<valT>(N);
NumGroups = p.groups();
Uat.resize(N);
dUat.resize(N);
d2Uat.resize(N);
cur_u.resize(N);
cur_du.resize(N);
cur_d2u.resize(N);
old_u.resize(N);
old_du.resize(N);
old_d2u.resize(N);
F.resize(NumGroups * NumGroups, nullptr);
DistCompressed.resize(N);
DistIndice.resize(N);
}
template<typename FT>
void J2OrbitalSoA<FT>::addFunc(int ia, int ib, FT* j)
{
if (ia == ib)
{
if (ia == 0) //first time, assign everything
{
int ij = 0;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg, ++ij)
if (F[ij] == nullptr)
F[ij] = j;
}
else
F[ia * NumGroups + ib] = j;
}
else
{
if (N == 2)
{
// a very special case, 1 up + 1 down
// uu/dd was prevented by the builder
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = 0; jg < NumGroups; ++jg)
F[ig * NumGroups + jg] = j;
}
else
{
// generic case
F[ia * NumGroups + ib] = j;
F[ib * NumGroups + ia] = j;
}
}
std::stringstream aname;
aname << ia << ib;
J2Unique[aname.str()] = j;
}
template<typename FT>
WaveFunctionComponentPtr J2OrbitalSoA<FT>::makeClone(ParticleSet& tqp) const
{
J2OrbitalSoA<FT>* j2copy = new J2OrbitalSoA<FT>(myName, tqp, -1);
if (dPsi)
j2copy->dPsi = dPsi->makeClone(tqp);
std::map<const FT*, FT*> fcmap;
for (int ig = 0; ig < NumGroups; ++ig)
for (int jg = ig; jg < NumGroups; ++jg)
{
int ij = ig * NumGroups + jg;
if (F[ij] == 0)
continue;
typename std::map<const FT*, FT*>::iterator fit = fcmap.find(F[ij]);
if (fit == fcmap.end())
{
FT* fc = new FT(*F[ij]);
j2copy->addFunc(ig, jg, fc);
//if (dPsi) (j2copy->dPsi)->addFunc(aname.str(),ig,jg,fc);
fcmap[F[ij]] = fc;
}
}
j2copy->KEcorr = KEcorr;
j2copy->Optimizable = Optimizable;
return j2copy;
}
/** intenal function to compute \f$\sum_j u(r_j), du/dr, d2u/dr2\f$
* @param P particleset
* @param iat particle index
* @param dist starting distance
* @param u starting value
* @param du starting first deriv
* @param d2u starting second deriv
*/
template<typename FT>
inline void J2OrbitalSoA<FT>::computeU3(const ParticleSet& P,
int iat,
const DistRow& dist,
RealType* restrict u,
RealType* restrict du,
RealType* restrict d2u,
bool triangle)
{
const int jelmax = triangle ? iat : N;
constexpr valT czero(0);
std::fill_n(u, jelmax, czero);
std::fill_n(du, jelmax, czero);
std::fill_n(d2u, jelmax, czero);
const int igt = P.GroupID[iat] * NumGroups;
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = std::min(jelmax, P.last(jg));
f2.evaluateVGL(iat, iStart, iEnd, dist.data(), u, du, d2u, DistCompressed.data(), DistIndice.data());
}
//u[iat]=czero;
//du[iat]=czero;
//d2u[iat]=czero;
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratio(ParticleSet& P, int iat)
{
//only ratio, ready to compute it again
UpdateMode = ORB_PBYP_RATIO;
cur_Uat = computeU(P, iat, P.getDistTable(my_table_ID_).getTempDists());
return std::exp(static_cast<PsiValueType>(Uat[iat] - cur_Uat));
}
template<typename FT>
inline void J2OrbitalSoA<FT>::evaluateRatiosAlltoOne(ParticleSet& P, std::vector<ValueType>& ratios)
{
const auto& d_table = P.getDistTable(my_table_ID_);
const auto& dist = d_table.getTempDists();
for (int ig = 0; ig < NumGroups; ++ig)
{
const int igt = ig * NumGroups;
valT sumU(0);
for (int jg = 0; jg < NumGroups; ++jg)
{
const FuncType& f2(*F[igt + jg]);
int iStart = P.first(jg);
int iEnd = P.last(jg);
sumU += f2.evaluateV(-1, iStart, iEnd, dist.data(), DistCompressed.data());
}
for (int i = P.first(ig); i < P.last(ig); ++i)
{
// remove self-interaction
const valT Uself = F[igt + ig]->evaluate(dist[i]);
ratios[i] = std::exp(Uat[i] + Uself - sumU);
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::GradType J2OrbitalSoA<FT>::evalGrad(ParticleSet& P, int iat)
{
return GradType(dUat[iat]);
}
template<typename FT>
typename J2OrbitalSoA<FT>::PsiValueType J2OrbitalSoA<FT>::ratioGrad(ParticleSet& P, int iat, GradType& grad_iat)
{
UpdateMode = ORB_PBYP_PARTIAL;
computeU3(P, iat, P.getDistTable(my_table_ID_).getTempDists(), cur_u.data(), cur_du.data(), cur_d2u.data());
cur_Uat = simd::accumulate_n(cur_u.data(), N, valT());
DiffVal = Uat[iat] - cur_Uat;
grad_iat += accumulateG(cur_du.data(), P.getDistTable(my_table_ID_).getTempDispls());
return std::exp(static_cast<PsiValueType>(DiffVal));
}
template<typename FT>
void J2OrbitalSoA<FT>::acceptMove(ParticleSet& P, int iat, bool safe_to_delay)
{
// get the old u, du, d2u
const auto& d_table = P.getDistTable(my_table_ID_);
computeU3(P, iat, d_table.getOldDists(), old_u.data(), old_du.data(), old_d2u.data());
if (UpdateMode == ORB_PBYP_RATIO)
{ //ratio-only during the move; need to compute derivatives
const auto& dist = d_table.getTempDists();
computeU3(P, iat, dist, cur_u.data(), cur_du.data(), cur_d2u.data());
}
valT cur_d2Uat(0);
const auto& new_dr = d_table.getTempDispls();
const auto& old_dr = d_table.getOldDispls();
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : cur_d2Uat)
for (int jat = 0; jat < N; jat++)
{
const valT du = cur_u[jat] - old_u[jat];
const valT newl = cur_d2u[jat] + lapfac * cur_du[jat];
const valT dl = old_d2u[jat] + lapfac * old_du[jat] - newl;
Uat[jat] += du;
d2Uat[jat] += dl;
cur_d2Uat -= newl;
}
posT cur_dUat;
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict new_dX = new_dr.data(idim);
const valT* restrict old_dX = old_dr.data(idim);
const valT* restrict cur_du_pt = cur_du.data();
const valT* restrict old_du_pt = old_du.data();
valT* restrict save_g = dUat.data(idim);
valT cur_g = cur_dUat[idim];
#pragma omp simd reduction(+ : cur_g) aligned(old_dX, new_dX, save_g, cur_du_pt, old_du_pt: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < N; jat++)
{
const valT newg = cur_du_pt[jat] * new_dX[jat];
const valT dg = newg - old_du_pt[jat] * old_dX[jat];
save_g[jat] -= dg;
cur_g += newg;
}
cur_dUat[idim] = cur_g;
}
LogValue += Uat[iat] - cur_Uat;
Uat[iat] = cur_Uat;
dUat(iat) = cur_dUat;
d2Uat[iat] = cur_d2Uat;
}
template<typename FT>
void J2OrbitalSoA<FT>::recompute(const ParticleSet& P)
{
const auto& d_table = P.getDistTable(my_table_ID_);
for (int ig = 0; ig < NumGroups; ++ig)
{
for (int iat = P.first(ig), last = P.last(ig); iat < last; ++iat)
{
computeU3(P, iat, d_table.getDistRow(iat), cur_u.data(), cur_du.data(), cur_d2u.data(), true);
Uat[iat] = simd::accumulate_n(cur_u.data(), iat, valT());
posT grad;
valT lap(0);
const valT* restrict u = cur_u.data();
const valT* restrict du = cur_du.data();
const valT* restrict d2u = cur_d2u.data();
const auto& displ = d_table.getDisplRow(iat);
constexpr valT lapfac = OHMMS_DIM - RealType(1);
#pragma omp simd reduction(+ : lap) aligned(du, d2u: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; ++jat)
lap += d2u[jat] + lapfac * du[jat];
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
const valT* restrict dX = displ.data(idim);
valT s = valT();
#pragma omp simd reduction(+ : s) aligned(du, dX: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; ++jat)
s += du[jat] * dX[jat];
grad[idim] = s;
}
dUat(iat) = grad;
d2Uat[iat] = -lap;
// add the contribution from the upper triangle
#pragma omp simd aligned(u, du, d2u: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; jat++)
{
Uat[jat] += u[jat];
d2Uat[jat] -= d2u[jat] + lapfac * du[jat];
}
for (int idim = 0; idim < OHMMS_DIM; ++idim)
{
valT* restrict save_g = dUat.data(idim);
const valT* restrict dX = displ.data(idim);
#pragma omp simd aligned(save_g, du, dX: QMC_SIMD_ALIGNMENT)
for (int jat = 0; jat < iat; jat++)
save_g[jat] -= du[jat] * dX[jat];
}
}
}
}
template<typename FT>
typename J2OrbitalSoA<FT>::LogValueType J2OrbitalSoA<FT>::evaluateLog(const ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L)
{
return evaluateGL(P, G, L, true);
}
template<typename FT>
WaveFunctionComponent::LogValueType J2OrbitalSoA<FT>::evaluateGL(const ParticleSet& P,
ParticleSet::ParticleGradient_t& G,
ParticleSet::ParticleLaplacian_t& L,
bool fromscratch)
{
if (fromscratch)
recompute(P);
LogValue = valT(0);
for (int iat = 0; iat < N; ++iat)
{
LogValue += Uat[iat];
G[iat] += dUat[iat];
L[iat] += d2Uat[iat];
}
return LogValue = -LogValue * 0.5;
}
template<typename FT>
void J2OrbitalSoA<FT>::evaluateHessian(ParticleSet& P, HessVector_t& grad_grad_psi)
{
LogValue = 0.0;
const DistanceTableData& d_ee(P.getDistTable(my_table_ID_));
valT dudr, d2udr2;
Tensor<valT, DIM> ident;
grad_grad_psi = 0.0;
ident.diagonal(1.0);
for (int i = 1; i < N; ++i)
{
const auto& dist = d_ee.getDistRow(i);
const auto& displ = d_ee.getDisplRow(i);
auto ig = P.GroupID[i];
const int igt = ig * NumGroups;
for (int j = 0; j < i; ++j)
{
auto r = dist[j];
auto rinv = 1.0 / r;
auto dr = displ[j];
auto jg = P.GroupID[j];
auto uij = F[igt + jg]->evaluate(r, dudr, d2udr2);
LogValue -= uij;
auto hess = rinv * rinv * outerProduct(dr, dr) * (d2udr2 - dudr * rinv) + ident * dudr * rinv;
grad_grad_psi[i] -= hess;
grad_grad_psi[j] -= hess;
}
}
}
} // namespace qmcplusplus
#endif
|
spmm_blocking_libxsmm.h | /*!
* Copyright (c) 2021 Intel Corporation
* \file array/cpu/spmm.h
* \brief SPMM CPU kernel function header.
* \author Sanchit Misra <sanchit.misra@intel.com>,
* Ramanarayan Mohanty <ramanarayan.mohanty@intel.com>,
* Vasimuddin Md <vasimuddin.md@intel.com>,
* Sasikanth Avancha <sasikanth.avancha@intel.com>
*/
#ifndef DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_
#define DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_
#include <dgl/array.h>
#include <dgl/bcast.h>
#include <dmlc/logging.h>
#include <algorithm>
#if !defined(_WIN32)
#ifdef USE_AVX
#ifdef USE_LIBXSMM
#include <unistd.h>
#include <libxsmm.h>
#ifdef DEBUG
#include <x86intrin.h>
#endif // DEBUG
#include <dmlc/omp.h>
#define NUM_BLOCKS_PER_THREAD 20
#define BLOCKING_HEURISTIC_PARAM 500
namespace dgl {
namespace aten {
namespace cpu {
template <typename IdType, typename DType>
struct CSRMatrixInternal {
IdType num_rows;
IdType num_cols;
IdType *indptr;
IdType *indices;
DType *data;
};
int32_t GetLLCSize() {
int32_t cache_size = sysconf(_SC_LEVEL3_CACHE_SIZE);
if (cache_size < 0) cache_size = DGL_CPU_LLC_SIZE;
return cache_size;
}
/*!
* \brief Tile the CSR matrix to roughly make sure that the column tiles and
* corresponding neighbor features fit into LLC and the row tiles
* are assigned to OMP threads.
* \param csr The Csr matrix.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param K_block_size block size along the columns of adjacency matrix.
* \param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs.
*/
template <typename IdType>
inline void SpMMCreateBlocks(
const CSRMatrix& csr,
CSRMatrixInternal<IdType, IdType> *block_csr_array,
IdType num_M_blocks,
IdType num_K_blocks,
IdType M_block_size,
IdType K_block_size,
bool use_lhs, bool use_rhs) {
const IdType M = csr.num_rows;
const IdType K = csr.num_cols;
IdType* indptr = csr.indptr.Ptr<IdType>();
IdType* indices = csr.indices.Ptr<IdType>();
IdType* edges = csr.data.Ptr<IdType>();
CHECK_NOTNULL(indptr);
if (use_lhs)
CHECK_NOTNULL(indices);
if (use_rhs)
CHECK_NOTNULL(edges);
if (num_K_blocks > 1) {
IdType *indptr_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64,
(M_block_size + 1) * num_M_blocks *
num_K_blocks * sizeof(IdType)));
IdType *indices_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64,
indptr[M] * sizeof(IdType)));
IdType *edges_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64,
indptr[M] * sizeof(IdType)));
#pragma omp parallel
{
IdType *my_cur_col_id = reinterpret_cast<IdType *>(aligned_alloc(64, 2 * M_block_size *
sizeof(IdType)));
#pragma omp for
for (IdType m = 0; m < num_M_blocks; m++) {
const IdType M_start = m * M_block_size;
const IdType M_end = std::min((m + 1) * M_block_size, M);
const IdType nnz = indptr[M_end] - indptr[M_start];
IdType cur_indices_id = 0;
IdType *my_indices_block_buf, *my_edges_block_buf;
if (use_lhs)
my_indices_block_buf = indices_block_buf + indptr[M_start];
if (use_rhs)
my_edges_block_buf = edges_block_buf + indptr[M_start];
for (IdType i = M_start; i < M_end; i++) {
my_cur_col_id[(i - M_start) * 2] = indptr[i];
my_cur_col_id[(i - M_start) * 2 + 1] = indptr[i + 1];
}
for (IdType k = 0; k < num_K_blocks; k++) {
const IdType K_start = k * K_block_size;
const IdType K_end = std::min((k + 1) * K_block_size, K);
CSRMatrixInternal<IdType, IdType> cur_csr;
cur_csr.num_rows = M_end - M_start;
cur_csr.num_cols = K_end - K_start;
// Create csr_ij
IdType *cur_csr_indptr = indptr_block_buf + (m * num_K_blocks + k) * (M_block_size + 1);
IdType *cur_csr_indices = nullptr, *cur_csr_edges = nullptr;
if (use_lhs)
cur_csr_indices = my_indices_block_buf + cur_indices_id;
if (use_rhs)
cur_csr_edges = my_edges_block_buf + cur_indices_id;
IdType cur_nnz = 0;
for (IdType i = M_start; i < M_end; i++) {
const IdType row_start = my_cur_col_id[(i - M_start) * 2];
const IdType row_end = my_cur_col_id[(i - M_start) * 2 + 1];
cur_csr_indptr[i - M_start] = cur_nnz;
IdType eid;
for (eid = row_start; eid < row_end; eid++) {
const IdType src = indices[eid];
const IdType edge = edges[eid];
if (src >= K_end) {
break;
}
CHECK_LT(cur_indices_id + cur_nnz, nnz);
if (use_lhs)
cur_csr_indices[cur_nnz] = src;
if (use_rhs)
cur_csr_edges[cur_nnz] = edge;
cur_nnz++;
}
my_cur_col_id[(i - M_start) * 2] = eid;
}
cur_csr_indptr[cur_csr.num_rows] = cur_nnz;
cur_indices_id += cur_nnz;
cur_csr.indptr = cur_csr_indptr;
if (use_lhs)
cur_csr.indices = cur_csr_indices;
if (use_rhs)
cur_csr.data = cur_csr_edges;
block_csr_array[m * num_K_blocks + k] = cur_csr;
}
CHECK_EQ(nnz, cur_indices_id);
}
free(my_cur_col_id);
}
} else {
#pragma omp for
for (IdType m = 0; m < num_M_blocks; m++) {
const IdType M_start = m * M_block_size;
const IdType M_end = std::min((m + 1) * M_block_size, M);
CSRMatrixInternal<IdType, IdType> cur_csr;
cur_csr.num_rows = M_end - M_start;
cur_csr.num_cols = K;
cur_csr.indptr = indptr + M_start;
cur_csr.indices = indices;
cur_csr.data = edges;
block_csr_array[m] = cur_csr;
}
}
}
/*!
* \brief Create libxsmm kernel.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param redop_flag Flag specifying the reduction operation.
* \param is_cmp Is the reduction operation a compare operation.
* \note libxsmm_dispatch_meltw_opreduce_vecs_idx creates a JIT'ed kernel.
* Given a node u, the kernel performs an elementwise "Op" on the
* features of the neighbors and/or the edges incident on u.
* Subsequently, it performs an elementwise "Redop" on all such
* features created and stores into the feature of node u.
* It uses a SIMD and a cache efficient design and also provides
* support to enable software prefetching if needed. For IdType,
* it supports INT32 and INT64. For DType, it supports BF16 and FP32.
* It supports all the "Ops" and "Redops" supported by DGL. Once a
* kernel is generated by libxsmm_dispatch_meltw_opreduce_vecs_idx,
* it is cached for the entire duration of the execution of a program
* so that subsequently if the kernel is needed again, it just returns
* the cached copy.
*/
template <typename IdType, typename DType, typename Op>
inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel(
bool has_idx,
IdType N,
libxsmm_meltw_opreduce_vecs_flags redop_flag,
bool is_cmp) {
int _ld = N;
libxsmm_meltw_opreduce_vecs_flags opredop_flags;
// First, set the Op in the opredop_flags
if (std::is_same<Op, op::Add<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_ADD;
} else if (std::is_same<Op, op::Sub<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_SUB;
} else if (std::is_same<Op, op::Mul<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_MUL;
} else if (std::is_same<Op, op::Div<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_DIV;
} else if (std::is_same<Op, op::CopyLhs<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_COPY;
} else if (std::is_same<Op, op::CopyRhs<DType>>::value) {
opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_COPY;
}
// Second, set which of lhs or rhs is considered first and second operand.
// This is needed since libxsmm assumes that the copy operation always copies the first operand.
// So, if we need to copy rhs, we need to set that as the first operand.
// For rhs, we also set whether to use implicit indices or provided indices.
if (std::is_same<Op, op::CopyLhs<DType>>::value) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIDX_VECIN);
} else if (std::is_same<Op, op::CopyRhs<DType>>::value) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIN_VECIDX);
if (!has_idx) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_IMPLICIT_INDEXED_VECIDX);
}
} else {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIDX_VECIN);
if (has_idx) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_INDEXED_VEC);
} else {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_IMPLICIT_INDEXED_VEC);
}
}
// Third, we set the Redop in the opredop_flags
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | redop_flag);
// Fourth, in case of Cmp Redop, set whether to record argmax/argmin for lhs/rhs
if (is_cmp) {
if (Op::use_lhs) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_RECORD_ARGOP_OFF_VEC_0);
}
if (Op::use_rhs) {
opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags |
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_RECORD_ARGOP_OFF_VEC_1);
}
}
libxsmm_meltwfunction_opreduce_vecs_idx kernel = nullptr;
if (std::is_same<DType, float>::value) {
kernel = libxsmm_dispatch_meltw_opreduce_vecs_idx(
N, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32,
(sizeof(IdType) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32, opredop_flags);
}
if (kernel == nullptr) {
LOG(FATAL) << "Failed to generate libxsmm kernel for the SpMM operation!";
}
return kernel;
}
/*!
* \brief Use libxsmm to perform SpMM-Sum on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes.
* \param E The feature on edges.
* \param C The result feature on destination nodes.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel.
*/
template <typename IdType, typename DType>
inline void SpMMBlockwiseOpSum(
CSRMatrixInternal<IdType, IdType> *block_csr_array,
const DType *B, const DType *E, DType *C, bool has_idx, IdType N,
IdType num_M_blocks, IdType num_K_blocks, IdType M_block_size,
libxsmm_meltwfunction_opreduce_vecs_idx kernel) {
DType (*in_matrix1)[N] = (DType (*)[N])B;
DType (*in_matrix2)[N] = (DType (*)[N])E;
DType (*output)[N] = (DType (*)[N])C;
#pragma omp parallel
{
for (IdType k = 0; k < num_K_blocks; k++) {
#pragma omp for schedule(dynamic)
for (IdType m = 0; m < num_M_blocks; m++) {
CSRMatrixInternal<IdType, IdType> cur_csr = block_csr_array[m * num_K_blocks + k];
const IdType M_start = m * M_block_size;
for (IdType i = 0; i < cur_csr.num_rows; i++) {
const IdType row_start = cur_csr.indptr[i];
const IdType row_end = cur_csr.indptr[i + 1];
const IdType dst = i + M_start;
libxsmm_meltw_opreduce_vecs_idx_param params;
params.n = row_end - row_start;
params.indices = &cur_csr.indices[row_start];
params.in_matrix = in_matrix1;
params.out_vec = &output[dst][0];
params.scale_vals = nullptr;
if (has_idx) {
params.in_matrix2 = in_matrix2;
params.indices2 = &cur_csr.data[row_start];
} else {
params.in_matrix2 = &in_matrix2[row_start];
}
kernel(¶ms);
}
}
}
}
}
/*!
* \brief Use libxsmm to perform SpMM-Max/Min on all blocks.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param B The feature on source nodes.
* \param E The feature on edges.
* \param C The result feature on destination nodes.
* \param argB Arg-Min/Max on source nodes.
* \param argE Arg-Min/Max on edges.
* \param has_idx For the edge features, are there indices available.
* \param N Feature size.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param M_block_size block size along the rows of adjacency matrix.
* \param kernel The libxsmm kernel.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
inline void SpMMBlockwiseOpCmp(
CSRMatrixInternal<IdType, IdType> *block_csr_array,
const DType *B, const DType *E, DType *C, IdType *argB, IdType *argE,
bool has_idx, IdType N,
IdType num_M_blocks, IdType num_K_blocks, IdType M_block_size,
libxsmm_meltwfunction_opreduce_vecs_idx kernel) {
DType (*in_matrix1)[N] = (DType (*)[N])B;
DType (*in_matrix2)[N] = (DType (*)[N])E;
DType (*output)[N] = (DType (*)[N])C;
IdType (*out_matrix1)[N] = (IdType (*)[N])argB;
IdType (*out_matrix2)[N] = (IdType (*)[N])argE;
#pragma omp parallel
{
for (IdType k = 0; k < num_K_blocks; k++) {
#pragma omp for schedule(dynamic)
for (IdType m = 0; m < num_M_blocks; m++) {
CSRMatrixInternal<IdType, IdType> cur_csr = block_csr_array[m * num_K_blocks + k];
const IdType M_start = m * M_block_size;
for (IdType i = 0; i < cur_csr.num_rows; i++) {
const IdType row_start = cur_csr.indptr[i];
const IdType row_end = cur_csr.indptr[i + 1];
const IdType dst = i + M_start;
libxsmm_meltw_opreduce_vecs_idx_param params;
params.n = row_end - row_start;
params.indices = &cur_csr.indices[row_start];
params.in_matrix = in_matrix1;
params.out_vec = &output[dst][0];
params.argop_off_vec_0 = &out_matrix1[dst][0];
params.argop_off_vec_1 = &out_matrix2[dst][0];
params.scale_vals = nullptr;
if (has_idx) {
params.in_matrix2 = in_matrix2;
params.indices2 = &cur_csr.data[row_start];
} else {
params.in_matrix2 = &in_matrix2[row_start];
}
kernel(¶ms);
}
}
}
}
}
/*!
* \brief Free the tiled CSR matrix data.
* \param block_csr_array The array containing csr matrices of all blocks.
* \param num_M_blocks Number of blocks to create along the rows of adjacency matrix.
* \param num_K_blocks Number of blocks to create along the columns of adjacency matrix.
* \param use_lhs Whether to use lhs.
* \param use_rhs Whether to use rhs.
*/
template <typename IdType>
inline void SpMMFreeBlocks(
CSRMatrixInternal<IdType, IdType> *block_csr_array,
IdType num_M_blocks, IdType num_K_blocks,
bool use_lhs, bool use_rhs) {
if (num_K_blocks > 1) {
free(block_csr_array[0].indptr);
if (use_lhs)
free(block_csr_array[0].indices);
if (use_rhs)
free(block_csr_array[0].data);
}
free(block_csr_array);
}
/*!
* \brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op, typename Redop>
void SpMMRedopCsrOpt(
const BcastOff& bcast,
const CSRMatrix& csr,
NDArray ufeat, NDArray efeat,
NDArray out,
NDArray argu, NDArray arge) {
int32_t llc_size = GetLLCSize();
#ifdef DEBUG
uint64_t startTick, endTick;
startTick = __rdtsc();
#endif // DEBUG
const bool has_idx = !IsNullArray(csr.data);
DType* C = out.Ptr<DType>();
const DType* B = ufeat.Ptr<DType>();
const DType* E = efeat.Ptr<DType>();
IdType *argB, *argE;
if (std::is_same<Redop, op::Max<DType>>::value || std::is_same<Redop, op::Min<DType>>::value) {
argB = argu.Ptr<IdType>();
argE = arge.Ptr<IdType>();
}
const int nthreads = omp_get_max_threads();
const IdType M = csr.num_rows;
const IdType N = bcast.out_len;
const IdType K = csr.num_cols;
const IdType* indptr = csr.indptr.Ptr<IdType>();
CHECK_NOTNULL(indptr);
const IdType total_nnz = indptr[M];
if (M <= 0 || K <= 0 || N <= 0 || total_nnz <= 0) return;
const double avg_degree = total_nnz * 1.0 / M;
const double nnz_prob = avg_degree / K;
IdType K_block_size = std::min((int64_t)K, (int64_t)(llc_size / (N * sizeof(DType) *
nnz_prob * BLOCKING_HEURISTIC_PARAM)));
IdType M_block_size = M / (nthreads * NUM_BLOCKS_PER_THREAD);
if (M_block_size == 0) M_block_size = 1;
if (K_block_size == 0) K_block_size = 1;
IdType num_M_blocks = (M + M_block_size - 1) / M_block_size;
IdType num_K_blocks = (K + K_block_size - 1) / K_block_size;
CSRMatrixInternal<IdType, IdType> *block_csr_array =
(CSRMatrixInternal<IdType, IdType> *)aligned_alloc(64,
sizeof(CSRMatrixInternal<IdType, IdType>) * num_M_blocks * num_K_blocks);
#ifdef DEBUG
endTick = __rdtsc();
if (std::is_same<Redop, op::Max<DType>>::value) {
LOG(INFO) << "Redop = Max";
} else if (std::is_same<Redop, op::Min<DType>>::value) {
LOG(INFO) << "Redop = Min";
} else if (std::is_same<Redop, op::Add<DType>>::value) {
LOG(INFO) << "Redop = Add";
}
LOG(INFO) << "nthreads = " << nthreads << ", llc_size = " << llc_size;
LOG(INFO) << "M = " << M << ", K = " << K << ", N = " << N;
LOG(INFO) << "use_lhs = " << Op::use_lhs << ", use_rhs = " << Op::use_rhs;
LOG(INFO) << "total_nnz = " << total_nnz << ", avg_degree = " << avg_degree;
LOG(INFO) << "has_idx = " << has_idx;
LOG(INFO) << "nnz_prob = " << nnz_prob;
LOG(INFO) << "K_block_size = " << K_block_size << ", M_block_size = " << M_block_size;
LOG(INFO) << "num_K_blocks = " << num_K_blocks << ", num_M_blocks = " << num_M_blocks;
LOG(INFO) << "stage0 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
SpMMCreateBlocks(csr, block_csr_array, num_M_blocks, num_K_blocks, M_block_size, K_block_size,
Op::use_lhs, Op::use_rhs);
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage1 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
libxsmm_meltwfunction_opreduce_vecs_idx kernel = nullptr;
if (std::is_same<Redop, op::Max<DType>>::value) {
kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MAX,
true);
} else if (std::is_same<Redop, op::Min<DType>>::value) {
kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MIN,
true);
} else if (std::is_same<Redop, op::Add<DType>>::value) {
kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N,
LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_SUM,
false);
}
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage2 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
if (std::is_same<Redop, op::Max<DType>>::value || std::is_same<Redop, op::Min<DType>>::value) {
SpMMBlockwiseOpCmp<IdType, DType, Op, Redop>(block_csr_array, B, E, C, argB, argE, has_idx, N,
num_M_blocks, num_K_blocks, M_block_size, kernel);
} else {
SpMMBlockwiseOpSum(block_csr_array, B, E, C, has_idx, N, num_M_blocks, num_K_blocks,
M_block_size, kernel);
}
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage3 ticks = " << (endTick - startTick);
startTick = __rdtsc();
#endif // DEBUG
SpMMFreeBlocks(block_csr_array, num_M_blocks, num_K_blocks, Op::use_lhs, Op::use_rhs);
#ifdef DEBUG
endTick = __rdtsc();
LOG(INFO) << "stage4 ticks = " << (endTick - startTick);
#endif // DEBUG
}
/*!
* \brief Optimized CPU kernel of SpMM-Sum on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op>
void SpMMSumCsrLibxsmm(const BcastOff& bcast, const CSRMatrix& csr,
NDArray ufeat, NDArray efeat, NDArray out) {
NDArray dummy;
SpMMRedopCsrOpt<IdType, DType, Op, op::Add<DType>>(bcast, csr, ufeat, efeat, out, dummy, dummy);
}
/*!
* \brief Optimized CPU kernel of SpMM-Min/Max on Csr format.
* \param bcast Broadcast information.
* \param csr The Csr matrix.
* \param ufeat The feature on source nodes.
* \param efeat The feature on edges.
* \param out The result feature on destination nodes.
* \param argu Arg-Min/Max on source nodes.
* \param arge Arg-Min/Max on edges.
* \note it uses libxsmm, blocking and dynamic thread scheduling.
*/
template <typename IdType, typename DType, typename Op, typename Cmp>
void SpMMCmpCsrLibxsmm(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat,
NDArray efeat, NDArray out, NDArray argu, NDArray arge) {
SpMMRedopCsrOpt<IdType, DType, Op, Cmp>(bcast, csr, ufeat, efeat, out, argu, arge);
}
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // USE_LIBXSMM
#endif // USE_AVX
#endif // _WIN32
#endif // DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_
|
arrsched.h | #pragma omp parallel for collapse(2)
for (long tk = GZ; tk < N + GZ; tk += TILEK)
for (long tj = GZ; tj < N + GZ; tj += TILEJ)
for (long ti = GZ; ti < N + GZ; ti += TILEI)
for (long k = tk; k < tk + TILEK; ++k)
for (long j = tj; j < tj + TILEJ; ++j)
#pragma omp simd
for (long i = ti; i < ti + TILEI; ++i)
|
AllSimplePaths.h | /*
* AllSimplePaths.h
*
* Created on: 23.06.2017
* Author: Eugenio Angriman
*/
#ifndef AllSimplePaths_H_
#define AllSimplePaths_H_
#include "../graph/Graph.h"
#include "../base/Algorithm.h"
namespace NetworKit {
/**
* @ingroup distance
* Determines all the possible simple paths from a given source node to a target node of a directed unweighted graph. It also accepts a cutoff value i.e. the maximum length of paths.
*/
class AllSimplePaths : public Algorithm {
public:
/**
* Creates the AllSimplePaths class for @a G, source @a s and target @a t.
*
* @param G The graph.
* @param source The source node.
* @param target The target node.
* @param cutoff The maximum length of the paths.
*/
AllSimplePaths(const Graph& G, node source, node target, count cutoff = none);
~AllSimplePaths() = default;
/**
* This method computes all possible paths from a given source node to a target node.
*/
void run() override;
/**
* This method returns the number of simple paths from the source node to the target node.
*/
count numberOfSimplePaths();
/*
* This method returns a vector that contains all the simple paths from a source node to a target node repepresented by vectors. Each path contains the source node as the first element and the target node as the last element.
*/
std::vector<std::vector<node>> getAllSimplePaths();
/*
* This method iterates over all the simple paths and it is far more efficient than calling getAllSimplePaths().
*/
template<typename L> void forAllSimplePaths(L handle);
/*
* This method iterates in parallel over all the simple paths and it is far more efficient than calling getAllSimplePaths().
*/
template<typename L> void parallelForAllSimplePaths(L handle);
protected:
// This method computes all the paths after a reverse BFS from the target node and a normal BFS from the source node.
void computePaths();
// This method returns a queue that contains all the nodes that could be part of a path from the source to the target that crosses @s.
std::vector<node>* getAvailableSources(node s, count pathLength = 0);
// The graph
const Graph &G;
// The source node
node source;
// The target node
node target;
// The cutoff i.e. maximum length of paths from source to target. It is optional.
count cutoff;
// This vector contains the distance from each node to the target node.
std::vector<count> distanceToTarget;
// This vector contains the distance from the source node to each node.
std::vector<count> distanceFromSource;
// This vector contains all the possible paths from source to target.
std::vector<std::vector<node>> paths;
};
inline count AllSimplePaths::numberOfSimplePaths() {
assureFinished();
return paths.size();
}
inline std::vector<std::vector<node>> AllSimplePaths::getAllSimplePaths() {
assureFinished();
return paths;
}
template<typename L>
void AllSimplePaths::forAllSimplePaths(L handle) {
assureFinished();
for (std::vector<std::vector<node>>::iterator it = paths.begin() ; it != paths.end(); ++it) {
handle(*it);
}
}
template<typename L>
void AllSimplePaths::parallelForAllSimplePaths(L handle) {
assureFinished();
#pragma omp parallel for schedule(guided)
for (omp_index i = 0; i < static_cast<omp_index>(paths.size()); ++i) {
handle(paths[i]);
}
}
} /* namespace NetworKit */
#endif /* AllSimplePaths_H_ */
|
plm.c | /*
* plmc
* Copyright (c) 2016, John Ingraham
* john.ingraham@gmail.com
*/
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdint.h>
#include <sys/time.h>
#include <assert.h>
#include <string.h>
/* Optionally include OpenMP with the -fopenmp flag */
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "include/plm.h"
#include "include/inference.h"
/* Usage pattern */
const char *usage =
"plmc\n"
"\n"
"Usage:\n"
" plm [options] alignmentfile\n"
" plm -c couplingsfile alignmentfile\n"
" plm -o paramfile -c couplingsfile alignmentfile\n"
" plm [-h | --help]\n"
" \n"
" Required input:\n"
" alignmentfile Multiple sequence alignment in FASTA format\n"
"\n"
" Options, output:\n"
" -c --couplings couplingsfile Save coupling scores to file (text)\n"
" -o --output paramfile Save estimated parameters to file (binary)\n"
"\n"
" Options, alignment processing:\n"
" -s --scale <value> Sequence weights: neighborhood weight [s > 0]\n"
" -t --theta <value> Sequence weights: neighborhood divergence [0 < t < 1]\n"
"\n"
" Options, Maximum a posteriori estimation (L-BFGS, default):\n"
" -lh --lambdah <value> Set L2 lambda for fields (h_i)\n"
" -le --lambdae <value> Set L2 lambda for couplings (e_ij)\n"
" -lg --lambdag <value> Set group L1 lambda for couplings (e_ij)\n"
"\n"
" Options, general:\n"
" -a --alphabet alphabet Alternative character set to use for analysis\n"
" -f --focus identifier Select only uppercase, non-gapped sites from a focus sequence\n"
" -g --gapignore Model sequence likelihoods only by coding, non-gapped portions\n"
" -m --maxiter Maximum number of iterations\n"
" -n --ncores [<number>|max] Maximum number of threads to use in OpenMP\n"
" -h --help Usage\n\n";
/* Internal functions to MSARead */
void MSAReadSeq(char *seq, FILE *fpAli);
letter_t MSAReadCode(char c, char *alphabet, int nCodes);
/* Global verbosity & profiling options */
int verbose = 2;
/* Reference amino acid indexing */
const char *codesAA = "-ACDEFGHIKLMNPQRSTVWY";
/* Regularization default parameters */
const numeric_t REGULARIZATION_LAMBDA_H = 0.01;
const numeric_t REGULARIZATION_LAMBDA_E = 100.0;
const numeric_t REGULARIZATION_LAMBDA_GROUP = 0.0;
const numeric_t REWEIGHTING_THETA = 0.20;
const numeric_t REWEIGHTING_SCALE = 1.0;
const int ZERO_APC_PRIORS = 0;
int main(int argc, char **argv) {
char *alignFile = NULL;
char *outputFile = NULL;
char *couplingsFile = NULL;
/* Default options */
options_t *options = (options_t *) malloc(sizeof(options_t));
options->theta = REWEIGHTING_THETA;
options->lambdaH = REGULARIZATION_LAMBDA_H;
options->lambdaE = REGULARIZATION_LAMBDA_E;
options->lambdaGroup = REGULARIZATION_LAMBDA_GROUP;
options->scale = REWEIGHTING_SCALE;
options->zeroAPC = 0;
options->maxIter = 0;
options->usePairs = 1;
options->estimator = INFER_MAP;
options->estimatorMAP = INFER_MAP_PLM;
options->target = NULL;
options->alphabet = (char *) codesAA;
/* Print usage if no arguments */
if (argc == 1) {
fprintf(stderr, "%s", usage);
exit(1);
}
/* Parse command line arguments */
for (int arg = 1; arg < argc; arg++) {
if ((arg < argc-1) && (strcmp(argv[arg], "--output") == 0
|| strcmp(argv[arg], "-o") == 0)) {
outputFile = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--alphabet") == 0
|| strcmp(argv[arg], "-a") == 0)) {
options->alphabet = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--couplings") == 0
|| strcmp(argv[arg], "-c") == 0)) {
couplingsFile = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdah") == 0
|| strcmp(argv[arg], "-lh") == 0)) {
options->lambdaH = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdae") == 0
|| strcmp(argv[arg], "-le") == 0)) {
options->lambdaE = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--lambdag") == 0
|| strcmp(argv[arg], "-lg") == 0)) {
options->lambdaGroup = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--theta") == 0
|| strcmp(argv[arg], "-t") == 0)) {
options->theta = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--scale") == 0
|| strcmp(argv[arg], "-s") == 0)) {
options->scale = atof(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--maxiter") == 0
|| strcmp(argv[arg], "-m") == 0)) {
options->maxIter = atoi(argv[++arg]);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--independent") == 0
|| strcmp(argv[arg], "-i") == 0)) {
options->usePairs = 0;
fprintf(stderr, "Independent model not yet implemented\n");
exit(0);
} else if ((arg < argc-1) && (strcmp(argv[arg], "--gapreduce") == 0
|| strcmp(argv[arg], "-g") == 0)) {
options->estimatorMAP = INFER_MAP_PLM_GAPREDUCE;
} else if ((arg < argc-1) && (strcmp(argv[arg], "--estimatele") == 0
|| strcmp(argv[arg], "-ee") == 0)) {
options->zeroAPC = 1;
} else if ((arg < argc-1) && (strcmp(argv[arg], "--focus") == 0
|| strcmp(argv[arg], "-f") == 0)) {
options->target = argv[++arg];
} else if ((arg < argc-1) && (strcmp(argv[arg], "--ncores") == 0
|| strcmp(argv[arg], "-n") == 0)) {
#if defined(_OPENMP)
if (strcmp(argv[arg + 1], "max") == 0) {
int maxThreads = omp_get_max_threads();
/* Redundant, but serves as sanity check */
omp_set_num_threads(maxThreads);
fprintf(stderr, "OpenMP: Using %d of %d threads\n",
maxThreads, maxThreads);
} else {
int numThreads = atoi(argv[arg + 1]);
int maxThreads = omp_get_max_threads();
if (numThreads >= 1 && numThreads <= maxThreads) {
omp_set_num_threads(numThreads);
fprintf(stderr, "OpenMP: Using %d of %d threads\n",
numThreads, maxThreads);
} else if (numThreads > maxThreads) {
omp_set_num_threads(maxThreads);
fprintf(stderr, "OpenMP: More threads requested than "
"available. Using %d of %d threads instead.\n",
maxThreads, maxThreads);
} else {
omp_set_num_threads(1);
fprintf(stderr, "OpenMP: Using 1 of %d threads\n",
maxThreads);
}
}
arg++;
#else
fprintf(stderr, "Error (-n/--ncores) only available when "
"compiled with OpenMP\n");
exit(1);
#endif
} else if (strcmp(argv[arg], "--help") == 0
|| strcmp(argv[arg], "-h") == 0) {
fprintf(stderr, "%s", usage);
exit(1);
}
}
alignFile = argv[argc - 1];
/* Read multiple seqence alignment */
alignment_t *ali = MSARead(alignFile, options);
/* Reweight sequences by inverse neighborhood density */
MSAReweightSequences(ali, options->theta, options->scale);
/* Compute sitwise and pairwise marginal distributions */
MSACountMarginals(ali, options);
/* Infer model parameters */
numeric_t *x = InferPairModel(ali, options);
/* (Optionally) Output estimated parameters and coupling scores */
if (outputFile != NULL)
OutputParametersFull(outputFile, x, ali, options);
if (couplingsFile != NULL)
OutputCouplingScores(couplingsFile, x, ali, options);
/* Free alignment and options */
MSAFree(ali, options);
}
alignment_t *MSARead(char *alignFile, options_t *options) {
/* Read FASTA-formatted alignment */
FILE *fpAli = NULL;
if (alignFile != NULL) {
fpAli = fopen(alignFile, "r");
} else {
fprintf(stderr, "Must specify alignment file: -a ALIGN_FILE\n");
exit(1);
}
if (fpAli == NULL) {
fprintf(stderr, "Error opening alignment file\n");
exit(1);
}
/* Allocate alignment */
alignment_t *ali = (alignment_t *) malloc(sizeof(alignment_t));
ali->nSeqs = ali->nSites = ali->nCodes = 0;
ali->alphabet = options->alphabet;
ali->names = NULL;
ali->sequences = NULL;
ali->target = -1;
ali->offsets = NULL;
ali->nEff = 0;
ali->weights = ali->fi = ali->fij = NULL;
ali->nParams = 0;
/* Verify alignment dimensions and structure (first pass through file) */
char name[BUFFER_SIZE];
char seq[BUFFER_SIZE];
/* Read first line as name */
fgetstr(name, fpAli);
if (*name == '>') {
MSAReadSeq(seq, fpAli);
} else {
fprintf(stderr, "Error reading alignment:"
" First line should start with >\n");
exit(1);
}
ali->nCodes = strlen(ali->alphabet);
ali->nSites = strlen(seq);
ali->nSeqs = 1;
while (!feof(fpAli)) {
char c = fgetc(fpAli);
if (c == '>') {
/* Read name and sequence pair */
fgetstr(name, fpAli);
MSAReadSeq(seq, fpAli);
} else {
fprintf(stderr, "Error reading alignment:"
" sequence records should start with >\n");
exit(1);
}
/* Validate sequence length */
if (strlen(seq) != ali->nSites) {
fprintf(stderr,
"Incompatible sequence length (%lu should be %d) for %s:\n%s\n",
strlen(seq), ali->nSites, name, seq);
exit(1);
}
ali->nSeqs++;
}
/* Encode full alignment block (second pass through file) */
ali->sequences = (letter_t *)
malloc(ali->nSites * ali->nSeqs * sizeof(letter_t));
ali->names = (char **) malloc(ali->nSeqs * sizeof(char *));
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++) seq(s, i) = 0;
for (int s = 0; s < ali->nSeqs; s++) ali->names[s] = NULL;
rewind(fpAli);
for (int s = 0; s < ali->nSeqs; s++) {
/* >Name */
getc(fpAli);
fgetstr(name, fpAli);
ali->names[s] = (char *) malloc((strlen(name) + 1) * sizeof(char));
strcpy(ali->names[s], name);
/* Sequence */
MSAReadSeq(seq, fpAli);
for (int i = 0; i < ali->nSites; i++)
seq(s, i) = MSAReadCode(seq[i], ali->alphabet, ali->nCodes);
}
/* --------------------------------_DEBUG_--------------------------------*/
/* Alignment to stderr */
// for (int s = 0; s < 10; s++) {
// for (int s = 0; s < ali->nSeqs; s++) {
// for (int i = 0; i < ali->nSites; i++)
// if (seq(s, i) >= 0 && seq(s, i) < ali->nCodes) {
// fprintf(stderr, "%c", ali->alphabet[seq(s, i)]);
// } else if (seq(s, i) >= -ali->nCodes && seq(s, i) < 0) {
// fprintf(stderr, "%c",
// tolower(ali->alphabet[seq(s, i) + ali->nCodes]));
// } else {
// fprintf(stderr, "*%d*", seq(s, i));
// }
// fprintf(stderr, "\n");
// }
// exit(0);
/* --------------------------------^DEBUG^--------------------------------*/
/* Focus mode: If a focus sequence (target) is provided, locate it */
if (options->target != NULL) {
for (int s = 0; s < ali->nSeqs; s++)
if (strncmp(options->target, ali->names[s],
strlen(options->target)) == 0) {
if (ali->target >= 0) {
fprintf(stderr,
"Multiple sequences start with %s, picking sequence %d\n",
options->target, s + 1);
} else {
ali->target = s;
}
}
if (ali->target >= 0) {
fprintf(stderr, "Found focus %s as sequence %d\n", options->target,
ali->target + 1);
} else {
fprintf(stderr,
"Could not find %s, proceeding without focus sequence\n",
options->target);
}
}
/* Always discard any sequences (rows) with out-of-alphabet characters */
int* seqValid = (int *) malloc(ali->nSeqs * sizeof(int));
for (int s = 0; s < ali->nSeqs; s++) seqValid[s] = 0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
if ((seq(s, i) >= -ali->nCodes) && (seq(s, i) < ali->nCodes))
seqValid[s]++;
int nValidSeqs = 0;
for (int s = 0; s < ali->nSeqs; s++)
if (seqValid[s] == ali->nSites) nValidSeqs++;
fprintf(stderr, "%d valid sequences out of %d \n", nValidSeqs, ali->nSeqs);
/* Recored indices of skipped sequences */
ali->nSkippedSeqs = ali->nSeqs - nValidSeqs;
ali->skippedSeqs = (int *) malloc(ali->nSkippedSeqs * sizeof(int));
for (int s = 0, skipIndex = 0; s < ali->nSeqs; s++)
if (seqValid[s] != ali->nSites) ali->skippedSeqs[skipIndex++] = s;
/* Focus mode: select only focus columns (criteria below) */
int nValidSites = ali->nSites;
int* siteValid = (int *) malloc(ali->nSites * sizeof(int));
for (int i = 0; i < ali->nSites; i++) siteValid[i] = 1;
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
/* For proteins, remove lower case and gap columns */
if ((ali->alphabet == codesAA)
&& (seq(ali->target, i) < 0))
siteValid[i] = 0;
/* Discard gaps */
if ((ali->alphabet == codesAA)
|| (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE))
if (seq(ali->target, i) == 0) siteValid[i] = 0;
}
nValidSites = 0;
for (int i = 0; i < ali->nSites; i++)
if (siteValid[i] == 1) nValidSites++;
fprintf(stderr,
"%d sites out of %d\n", nValidSites, ali->nSites);
} else {
fprintf(stderr,
"%d sites\n", ali->nSites);
}
/* Focus mode: parse region (NAME/START_IX-END_IX) and map offsets */
int leftOffset = 0;
if (ali->target >= 0) {
char *focusName = ali->names[ali->target];
/* Name should be immediately followed by '/' */
if (strlen(focusName) > strlen(options->target) + 1
&& focusName[strlen(options->target)] == '/') {
/* Attempt to read integer region start */
int regLeft = strlen(options->target) + 1;
int ix = 0;
if (isdigit(focusName[regLeft])) {
while (regLeft + ix < strlen(focusName)
&& isdigit(focusName[regLeft + ix + 1])) ix++;
int tens = 1;
leftOffset = -1;
for (int i = ix; i >= 0; i--) {
leftOffset += tens * (focusName[regLeft + i] - '0');
tens *= 10;
}
fprintf(stderr, "Region starts at %d\n", leftOffset + 1);
} else {
fprintf(stderr, "Error parsing region, assuming start at 1");
}
}
/* Map the offsets */
ali->offsets = (int *) malloc(nValidSites * sizeof(int));
for (int i = 0; i < nValidSites; i++) ali->offsets[i] = i + 1;
int ix = 0;
for (int i = 0; i < ali->nSites; i++)
if (siteValid[i] == 1) {
ali->offsets[ix] = i + 1 + leftOffset;
ix++;
}
/* Reposition the target for reduced alignment */
int targetShift = -1;
for (int i = 0; i <= ali->target; i++)
if (seqValid[i] == ali->nSites) targetShift++;
ali->target = targetShift;
}
/* Copy only selected rows and columns */
if (nValidSeqs < ali->nSeqs || nValidSites < ali->nSites) {
letter_t *seqsReduced = (letter_t *)
malloc(nValidSites * nValidSeqs * sizeof(letter_t));
for (int i = 0; i < nValidSites * nValidSeqs; i++) seqsReduced[i] = 0;
int sx = 0;
for (int s = 0; s < ali->nSeqs; s++)
if (seqValid[s] == ali->nSites) {
int ix = 0;
for (int i = 0; i < ali->nSites; i++) {
if (siteValid[i] == 1) {
seqsReduced[ix + sx * nValidSites] = seq(s, i);
ix++;
}
}
sx++;
}
/* Reallocate alignment with reduced dimensions */
free(ali->sequences);
ali->nSeqs = nValidSeqs;
ali->nSites = nValidSites;
ali->sequences = (letter_t *)
malloc(nValidSites * nValidSeqs * sizeof(letter_t));
for (int i = 0; i < nValidSites * nValidSeqs; i++)
ali->sequences[i] = 0;
for (int s = 0; s < nValidSeqs; s++)
for (int i = 0; i < nValidSites; i++)
seq(s, i) = seqsReduced[i + s * nValidSites];
free(seqsReduced);
}
/* Shift any lowercase codes back to uppercase */
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
if (seq(s, i) < 0) seq(s, i) += ali->nCodes;
/* Intialize weights to 1.0 */
ali->weights = (numeric_t *) malloc(ali->nSeqs * sizeof(numeric_t));
for (int s = 0; s < ali->nSeqs; s++) ali->weights[s] = 1.0;
ali->nEff = (numeric_t) ali->nSeqs;
/* --------------------------------_DEBUG_--------------------------------*/
/* Display offset map */
// for (int i = 0; i < ali->nSites; i++) {
// fprintf(stderr, "%d : %d : %c\n", i + 1, ali->offsets[i],
// ali->alphabet[seq(ali->target, i)]);
// }
// exit(0);
/* Display target */
// for (int i = 0; i < ali->nSites; i++) {
// fprintf(stderr, "%c", ali->alphabet[seq(ali->target, i)]);
// }
// fprintf(stderr, "\n");
// exit(0);
/* --------------------------------^DEBUG^--------------------------------*/
/* --------------------------------_DEBUG_--------------------------------*/
// for (int s = 0; s < ali->nSeqs; s++) {
// fprintf(stderr, ">%s\n", ali->names[s]);
// for (int i = 0; i < ali->nSites; i++)
// fprintf(stderr, "%c", ali->alphabet[seq(s, i)]);
// fprintf(stderr, "\n");
// }
/* --------------------------------^DEBUG^--------------------------------*/
return ali;
}
void MSAReadSeq(char *seq, FILE *fpAli) {
/* Read sequence from the current line(s) */
char buf[BUFFER_SIZE];
/* Look ahead one character */
char c = fgetc(fpAli);
ungetc(c, fpAli);
seq[0] = '\0';
while (c != '>' && !feof(fpAli)) {
fgetstr(buf, fpAli);
strcat(seq, buf);
/* Look ahead one character */
c = fgetc(fpAli);
ungetc(c, fpAli);
}
}
letter_t MSAReadCode(char c, char *alphabet, int nCodes) {
/* Encode a character as an integer between -nCodes and +nCodes
In alphabet: store index [0, nCodes - 1]
Lowercase version of alphabet: downshift by nCodes [-nCodes, -1]
Out of alphabet: store nCodes [nCodes]
*/
letter_t i = 0;
/* Protein-specific treatment of '.' */
if (alphabet == codesAA) if (c == '.') c = '-';
/* Store lowercase characters as down-shifted by nCodes */
while ((i < nCodes - 1) && toupper(c) != alphabet[i]) i++;
if (c != alphabet[i] && toupper(c) == alphabet[i]) i -= nCodes;
/* Encode out-of-alphabet characters by [nCodes] */
if (i > 0 && toupper(c) != alphabet[i]) i = nCodes;
return i;
}
void MSAReweightSequences(alignment_t *ali, numeric_t theta, numeric_t scale) {
/* Reweight seqeuences by their inverse neighborhood size. Each sequence's
weight is the inverse of the number of neighboring sequences with less
than THETA percent divergence
*/
for (int i = 0; i < ali->nSeqs; i++) ali->weights[i] = 1.0;
/* Only apply reweighting if theta is on [0,1] */
if (theta >= 0 && theta <= 1) {
/* The neighborhood size of each sequence is the number of sequences
in the alignment within theta percent divergence */
#if defined(_OPENMP)
/* Naive parallelization is faster ignoring symmetry */
#pragma omp parallel for
for (int s = 0; s < ali->nSeqs; s++)
for (int t = 0; t < ali->nSeqs; t++)
if (s != t) {
int id = 0;
for (int n = 0; n < ali->nSites; n++)
id += (seq(s, n) == seq(t, n));
if (id >= ((1 - theta) * ali->nSites))
ali->weights[s] += 1.0;
}
#else
/* For a single core, take advantage of symmetry */
for (int s = 0; s < ali->nSeqs - 1; s++)
for (int t = s + 1; t < ali->nSeqs; t++) {
int id = 0;
for (int n = 0; n < ali->nSites; n++)
id += (seq(s, n) == seq(t, n));
if (id >= ((1 - theta) * ali->nSites)) {
ali->weights[s] += 1.0;
ali->weights[t] += 1.0;
}
}
#endif
/* Reweight sequences by the inverse of the neighborhood size */
for (int i = 0; i < ali->nSeqs; i++)
ali->weights[i] = 1.0 / ali->weights[i];
}
/* Scale sets the effective number of samples per neighborhood */
for (int i = 0; i < ali->nSeqs; i++)
ali->weights[i] *= scale;
/* The effective number of sequences is then the sum of the weights */
ali->nEff = 0;
for (int i = 0; i < ali->nSeqs; i++) ali->nEff += ali->weights[i];
if (theta >= 0 && theta <= 1) {
fprintf(stderr,
"Effective number of samples: %.1f\t(%.0f%% identical neighborhood = %.3f samples)\n",
ali->nEff, 100 * (1 - theta), scale);
} else {
fprintf(stderr,
"Theta not between 0 and 1, no sequence reweighting applied\n");
}
}
void MSACountMarginals(alignment_t *ali, options_t *options) {
/* Compute first and second order marginal distributions, according to the
sequence weights
*/
if (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE) {
/* Condition the marginals on ungapped */
ali->nCodes = strlen(ali->alphabet) - 1;
/* First-order marginals P_i(Ai) */
int nFi = ali->nSites * ali->nCodes;
ali->fi = (numeric_t *) malloc(nFi * sizeof(numeric_t));
for (int i = 0; i < nFi; i++) ali->fi[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
if (seq(s, i) > 0)
fi(i, seq(s, i) - 1) += ali->weights[s];
/* Second-order marginals P_ij(Ai, Aj) */
int nFij = ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes;
ali->fij = (numeric_t *) malloc(nFij * sizeof(numeric_t));
for (int i = 0; i < nFij; i++) ali->fij[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
if (seq(s, i) > 0) if(seq(s, j) > 0)
fij(i, j, seq(s, i) - 1, seq(s, j) - 1)
+= ali->weights[s];
/* Normalize conditional distributions */
for (int i = 0; i < ali->nSites; i++) {
double fsum = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
fsum += fi(i, ai);
if (fsum != 0) {
double fsumInv = 1.0 / fsum;
for (int ai = 0; ai < ali->nCodes; ai++)
fi(i, ai) *= fsumInv;
} else {
/* Handle empty columns */
numeric_t flatF = 1.0 / ((numeric_t) ali->nCodes);
for (int ai = 0; ai < ali->nCodes; ai++)
fi(i, ai) = flatF;
}
}
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
double fsum = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
fsum += fij(i, j, ai, aj);
if (fsum != 0) {
double fsumInv = 1.0 / fsum;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
fij(i, j, ai, aj) *= fsumInv;
} else {
/* Handle pairs of empty columns */
numeric_t flatF = 1.0 / ((numeric_t) (ali->nCodes * ali->nCodes));
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
fij(i, j, ai, aj) = flatF;
}
}
} else {
/* Compute regular marginals */
numeric_t Zinv = 1.0 / ali->nEff;
/* First-order marginals P_i(Ai) */
int nFi = ali->nSites * ali->nCodes;
ali->fi = (numeric_t *) malloc(nFi * sizeof(numeric_t));
for (int i = 0; i < nFi; i++) ali->fi[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites; i++)
fi(i, seq(s, i)) += ali->weights[s] * Zinv;
/* Second-order marginals P_ij(Ai, Aj) */
int nFij = ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes;
ali->fij = (numeric_t *) malloc(nFij * sizeof(numeric_t));
for (int i = 0; i < nFij; i++) ali->fij[i] = 0.0;
for (int s = 0; s < ali->nSeqs; s++)
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
fij(i, j, seq(s, i), seq(s, j)) += ali->weights[s] * Zinv;
}
}
void MSAFree(alignment_t *ali, options_t *options) {
/* Free alignment and options */
if (ali->names && ali->names[0])
for (int i = 0; i < ali->nSeqs; i++) free(ali->names[i]);
free(ali->names);
free(ali->sequences);
free(ali->weights);
free(ali->fi);
free(ali->fij);
/* Note: options->target and options->alphabet are never allocated */
free(options);
}
#define OUTPUT_PRECISION float
void OutputParametersSite(char *outputFile, const numeric_t *x,
alignment_t *ali) {
FILE *fpOutput = NULL;
fpOutput = fopen(outputFile, "w");
if (fpOutput != NULL) {
/* 1: nSites */
fwrite(&(ali->nSites), sizeof(ali->nSites), 1, fpOutput);
/* 2: (Focus mode only) target sequence */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
char c = (char) ali->alphabet[seq(ali->target, i)];
fwrite(&c, sizeof(char), 1, fpOutput);
}
} else {
char c = ali->alphabet[0];
for (int i = 0; i < ali->nSites; i++)
fwrite(&c, sizeof(c), 1, fpOutput);
}
/* 3: (Focus mode only) offset map */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
int ix = ali->offsets[i];
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
} else {
for (int i = 0; i < ali->nSites; i++) {
int ix = i + 1;
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
}
/* 4,5: sitewise marginals fi, twice */
for (int x = 0; x < 2; x++)
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION f = (OUTPUT_PRECISION) fi(i, ai);
fwrite(&f, sizeof(f), 1, fpOutput);
}
/* 6: sitewise parameters hi */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION h = (OUTPUT_PRECISION) xHi(i, ai);
fwrite(&h, sizeof(h), 1, fpOutput);
}
fclose(fpOutput);
} else {
fprintf(stderr, "Error writing parameters\n");
exit(1);
}
}
void OutputParametersFull(char *outputFile, const numeric_t *x,
alignment_t *ali, options_t *options) {
/* File format */
FILE *fpOutput = NULL;
fpOutput = fopen(outputFile, "w");
if (fpOutput != NULL) {
/* 1: nSites */
int32_t nSites = (int32_t) ali->nSites;
fwrite(&nSites, sizeof(nSites), 1, fpOutput);
/* 2: nCodes */
int32_t nCodes = (int32_t) ali->nCodes;
fwrite(&nCodes, sizeof(nCodes), 1, fpOutput);
/* 3: nSeqs */
int32_t nSeqs = (int32_t) ali->nSeqs;
fwrite(&nSeqs, sizeof(nSeqs), 1, fpOutput);
/* 4: nSkippedSeqs */
int32_t nSkippedSeqs = (int32_t) ali->nSkippedSeqs;
fwrite(&nSkippedSeqs, sizeof(nSkippedSeqs), 1, fpOutput);
/* 5: number of iterations */
int32_t maxIter = (int32_t) options->maxIter;
fwrite(&maxIter, sizeof(maxIter), 1, fpOutput);
/* 6: theta */
OUTPUT_PRECISION theta = (OUTPUT_PRECISION) options->theta;
fwrite(&theta, sizeof(theta), 1, fpOutput);
/* 7: lambda for fields (lh) */
OUTPUT_PRECISION lh = (OUTPUT_PRECISION) options->lambdaH;
fwrite(&lh, sizeof(lh), 1, fpOutput);
/* 8: lambda for couplings (le) */
OUTPUT_PRECISION le = (OUTPUT_PRECISION) options->lambdaE;
fwrite(&le, sizeof(le), 1, fpOutput);
/* 9: group lambda for couplings (lg) */
OUTPUT_PRECISION lg = (OUTPUT_PRECISION) options->lambdaGroup;
fwrite(&lg, sizeof(lg), 1, fpOutput);
/* 10: effective sample size (nEff) */
OUTPUT_PRECISION nEff = (OUTPUT_PRECISION) ali->nEff;
fwrite(&nEff, sizeof(nEff), 1, fpOutput);
/* 11: alphabet */
int isGapped = (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE);
for (int i = 0; i < ali->nCodes; i++) {
int8_t letter = (int8_t) ali->alphabet[i + isGapped];
fwrite(&letter, sizeof(letter), 1, fpOutput);
}
/* 12: sequence number of neighbors (self included) */
int skipix = 0, reducedix = 0;
for (int s = 0; s < ali->nSeqs + ali->nSkippedSeqs; s++) {
if (skipix < ali->nSkippedSeqs && s == ali->skippedSeqs[skipix]) {
/* Skip skipped sequences */
OUTPUT_PRECISION w = (OUTPUT_PRECISION) 0;
fwrite(&w, sizeof(w), 1, fpOutput);
skipix++;
} else {
numeric_t nNeighbors = ali->weights[reducedix];
nNeighbors = 1.0 / (nNeighbors * options->scale);
OUTPUT_PRECISION w = (OUTPUT_PRECISION) nNeighbors;
fwrite(&w, sizeof(w), 1, fpOutput);
reducedix++;
}
}
/* 13: (Focus mode) target sequence */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
int8_t c = (int8_t) ali->alphabet[seq(ali->target, i)];
fwrite(&c, sizeof(c), 1, fpOutput);
}
} else {
int8_t c = (int8_t) ali->alphabet[0];
for (int i = 0; i < ali->nSites; i++)
fwrite(&c, sizeof(c), 1, fpOutput);
}
/* 14: (Focus mode) offset map */
if (ali->target >= 0) {
for (int i = 0; i < ali->nSites; i++) {
int32_t ix = (int32_t) ali->offsets[i];
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
} else {
for (int i = 0; i < ali->nSites; i++) {
int32_t ix = (int32_t) i + 1;
fwrite(&ix, sizeof(ix), 1, fpOutput);
}
}
/* 15: sitewise marginals fi */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION f = (OUTPUT_PRECISION) fi(i, ai);
fwrite(&f, sizeof(f), 1, fpOutput);
}
/* 16: sitewise parameters hi */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
OUTPUT_PRECISION h = (OUTPUT_PRECISION) xHi(i, ai);
fwrite(&h, sizeof(h), 1, fpOutput);
}
/* 17: pairwise marginals fij */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
OUTPUT_PRECISION f =
(OUTPUT_PRECISION) fij(i, j, ai, aj);
fwrite(&f, sizeof(f), 1, fpOutput);
}
/* 18: couplings eij */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
OUTPUT_PRECISION e =
(OUTPUT_PRECISION) xEij(i, j, ai, aj);
fwrite(&e, sizeof(e), 1, fpOutput);
}
fclose(fpOutput);
} else {
fprintf(stderr, "Error writing parameters\n");
exit(1);
}
}
#undef OUTPUT_PRECISION
void OutputCouplingScores(char *couplingsFile, const numeric_t *x,
alignment_t *ali, options_t *options) {
FILE *fpOutput = NULL;
fpOutput = fopen(couplingsFile, "w");
if (fpOutput != NULL) {
/* Compute the norm of the coupling parameters between each pair */
numeric_t *couplings =
(numeric_t *) malloc((ali->nSites * (ali->nSites - 1) / 2)
* sizeof(numeric_t));
for (int i = 0; i < ali->nSites * (ali->nSites - 1) / 2;
i++) couplings[i] = 0;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
/* Norm(eij) over ai, aj */
numeric_t norm = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
norm += xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
norm = sqrt(norm);
coupling(i, j) = norm;
}
numeric_t nPairs =
((numeric_t) ((ali->nSites) * (ali->nSites - 1))) / 2.0;
/* Remove first component of the norms (Average Product Correction) */
if (!options->zeroAPC) {
/* Determine the site-wise statistics of the norms */
numeric_t C_avg = 0.0;
numeric_t *C_pos_avg =
(numeric_t *) malloc(ali->nSites * sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++) {
C_pos_avg[i] = 0.0;
}
for (int i = 0; i < ali->nSites - 1; i++) {
for (int j = i + 1; j < ali->nSites; j++) {
C_pos_avg[i] +=
coupling(i, j) / (numeric_t) (ali->nSites - 1);
C_pos_avg[j] +=
coupling(i, j) / (numeric_t) (ali->nSites - 1);
C_avg += coupling(i, j) / nPairs;
}
}
/* Remove the first component */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
coupling(i, j) =
coupling(i, j) - C_pos_avg[i] * C_pos_avg[j] / C_avg;
}
/* Output scores */
if (ali->target >= 0) {
/* Focus mode */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
char ai = (char) ali->alphabet[seq(ali->target, i)];
char aj = (char) ali->alphabet[seq(ali->target, j)];
fprintf(fpOutput, "%d %c %d %c 0 %f\n",
ali->offsets[i], ai, ali->offsets[j], aj,
coupling(i, j));
}
} else {
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
fprintf(fpOutput, "%d - %d - 0 %f\n", i + 1, j + 1,
coupling(i, j));
}
fclose(fpOutput);
} else {
fprintf(stderr, "Error writing coupling scores\n");
exit(1);
}
} |
factorial.c | #include<stdio.h>
#include<omp.h>
#include<stdlib.h>
double fact(double n)
{
double f=1;
for (int i=1;i<n;i++)
{
f *= i;
}
return (f);
}
int main(int argc, char* argv[])
{
double y1,y2,y;
#pragma omp sections
{
#pragma omp section
y1 = fact(1);
#pragma omp section
y2 = fact(4);
}
y = y1 + y2;
printf("%f\n", y);
} |
GB_unop__identity_uint64_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint64_fc32)
// op(A') function: GB (_unop_tran__identity_uint64_fc32)
// C type: uint64_t
// A type: GxB_FC32_t
// cast: uint64_t cij = GB_cast_to_uint64_t ((double) crealf (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint64_fc32)
(
uint64_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
uint64_t z = GB_cast_to_uint64_t ((double) crealf (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint64_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
c_jacobi01.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
Copyright (c) 2004, OmpSCR Group
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of La Laguna nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
OF SUCH DAMAGE.
FILE: c_jacobi01.c
VERSION: 1.1
DATE: Oct 2004
AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003
anmey@rz.rwth-aachen.de
http://www.rwth-aachen.de/People/D.an.Mey.html
COMMENTS TO: ompscr@etsii.ull.es
DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation :
(d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method.
COMMENTS: OpenMP version 1: two parallel regions with one parallel loop each, the naive approach.
Directives are used in this code to achieve paralleism.
All do loops are parallized with default 'static' scheduling.
REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html
BASIC PRAGMAS: parallel for
USAGE: ./c_jacobi01.par 5000 5000 0.8 1.0 1000
INPUT: n - grid dimension in x direction
m - grid dimension in y direction
alpha - Helmholtz constant (always greater than 0.0)
tol - error tolerance for iterative solver
relax - Successice over relaxation parameter
mits - Maximum iterations for iterative solver
OUTPUT: Residual and error
u(n,m) - Dependent variable (solutions)
f(n,m) - Right hand side function
FILE FORMATS: -
RESTRICTIONS: -
REVISION HISTORY:
**************************************************************************/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include "OmpSCR.h"
#define U(i,j) u[(i)*n+(j)]
#define F(i,j) f[(i)*n+(j)]
#define NUM_ARGS 6
#define NUM_TIMERS 1
int n, m, mits;
double tol, relax, alpha;
void jacobi (int n, int m, double dx, double dy,
double alpha, double omega,
double *u, double *f,
double tol, int maxit );
/******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize(
int n,
int m,
double alpha,
double *dx,
double *dy,
double *u,
double *f)
{
int i,j,xx,yy;
*dx = 2.0 / (n-1);
*dy = 2.0 / (m-1);
/* Initilize initial condition and RHS */
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + *dx * (i-1);
yy = -1.0 + *dy * (j-1);
U(j,i) = 0.0;
F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy)
- 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy);
}
}
}
/************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check(
int n,
int m,
double alpha,
double dx,
double dy,
double *u,
double *f)
{
int i,j;
double xx, yy, temp, error;
dx = 2.0 / (n-1);
dy = 2.0 / (n-2);
error = 0.0;
for (j=0; j<m; j++){
for (i=0; i<n; i++){
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy);
error += temp*temp;
}
}
error = sqrt(error)/(n*m);
printf("Solution Error : %g\n", error);
}
int main(int argc, char **argv){
double *u, *f, dx, dy;
double dt, mflops;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =",
"Successive over-relaxation parameter =",
"error tolerance for iterative solver =", "Maximum iterations for solver ="};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"};
char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacobi01' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
n = OSCR_getarg_int(1);
m = OSCR_getarg_int(2);
alpha = OSCR_getarg_double(3);
relax = OSCR_getarg_double(4);
tol = OSCR_getarg_double(5);
mits = OSCR_getarg_int(6);
printf("-> %d, %d, %g, %g, %g, %d\n",
n, m, alpha, relax, tol, mits);
u = (double *) OSCR_malloc(n*m*sizeof(double));
f = (double *) OSCR_malloc(n*m*sizeof(double));
/* arrays are allocated and initialzed */
initialize(n, m, alpha, &dx, &dy, u, f);
/* Solve Helmholtz eqiation */
OSCR_timer_start(0);
jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits);
OSCR_timer_stop(0);
dt = OSCR_timer_read(0);
printf(" elapsed time : %12.6f\n", dt);
mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt;
printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt);
error_check(n, m, alpha, dx, dy, u, f);
OSCR_report(1, TIMERS_NAMES);
return 0;
}
/*
subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************
*/
void jacobi ( const int n, const int m, double dx, double dy, double alpha,
double omega, double *u, double *f, double tol, int maxit )
{
int i,j,k;
double error, resid, ax, ay, b;
double *uold;
/* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf)
getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das
gespiegelte Problem loesen wollen */
uold = (double *)OSCR_malloc(sizeof(double) * n *m);
ax = 1.0/(dx * dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y_direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while (k <= maxit && error > tol) {
error = 0.0;
/* copy new solution into old */
#pragma omp parallel for private(i) schedule(dynamic)
for (j=0; j<m; j++)
for (i=0; i<n; i++)
uold[i + m*j] = u[i + m*j];
/* compute stencil, residual and update */
#pragma omp parallel for reduction(+:error) private(i,resid) schedule(dynamic)
for (j=1; j<m-1; j++)
for (i=1; i<n-1; i++){
resid =(
ax * (uold[i-1 + m*j] + uold[i+1 + m*j])
+ ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)])
+ b * uold[i + m*j] - f[i + m*j]
) / b;
/* update solution */
u[i + m*j] = uold[i + m*j] - omega * resid;
/* accumulate residual error */
error =error + resid*resid;
}
/* error check */
k++;
error = sqrt(error) /(n*m);
} /* while */
printf("Total Number of Iterations %d\n", k);
printf("Residual %.15f\n\n", error);
free(uold);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.